remove unnecessary uint32_t casts of 0
[unleashed.git] / usr / src / uts / common / io / fibre-channel / fca / qlc / ql_api.c
blob5932f9ec57c77e67cd4b4f73317b48561ddb302b
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 /* Copyright 2010 QLogic Corporation */
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 * Copyright (c) 2016 by Delphix. All rights reserved.
33 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
35 * ***********************************************************************
36 * * **
37 * * NOTICE **
38 * * COPYRIGHT (C) 1996-2010 QLOGIC CORPORATION **
39 * * ALL RIGHTS RESERVED **
40 * * **
41 * ***********************************************************************
45 #include <ql_apps.h>
46 #include <ql_api.h>
47 #include <ql_debug.h>
48 #include <ql_init.h>
49 #include <ql_iocb.h>
50 #include <ql_ioctl.h>
51 #include <ql_isr.h>
52 #include <ql_mbx.h>
53 #include <ql_nx.h>
54 #include <ql_xioctl.h>
57 * Solaris external defines.
59 extern pri_t minclsyspri;
60 extern pri_t maxclsyspri;
63 * dev_ops functions prototypes
65 static int ql_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
66 static int ql_attach(dev_info_t *, ddi_attach_cmd_t);
67 static int ql_detach(dev_info_t *, ddi_detach_cmd_t);
68 static int ql_power(dev_info_t *, int, int);
69 static int ql_quiesce(dev_info_t *);
72 * FCA functions prototypes exported by means of the transport table
74 static opaque_t ql_bind_port(dev_info_t *, fc_fca_port_info_t *,
75 fc_fca_bind_info_t *);
76 static void ql_unbind_port(opaque_t);
77 static int ql_init_pkt(opaque_t, fc_packet_t *, int);
78 static int ql_un_init_pkt(opaque_t, fc_packet_t *);
79 static int ql_els_send(opaque_t, fc_packet_t *);
80 static int ql_get_cap(opaque_t, char *, void *);
81 static int ql_set_cap(opaque_t, char *, void *);
82 static int ql_getmap(opaque_t, fc_lilpmap_t *);
83 static int ql_transport(opaque_t, fc_packet_t *);
84 static int ql_ub_alloc(opaque_t, uint64_t *, uint32_t, uint32_t *, uint32_t);
85 static int ql_ub_free(opaque_t, uint32_t, uint64_t *);
86 static int ql_ub_release(opaque_t, uint32_t, uint64_t *);
87 static int ql_abort(opaque_t, fc_packet_t *, int);
88 static int ql_reset(opaque_t, uint32_t);
89 static int ql_port_manage(opaque_t, fc_fca_pm_t *);
90 static opaque_t ql_get_device(opaque_t, fc_portid_t);
93 * FCA Driver Support Function Prototypes.
95 static uint16_t ql_wait_outstanding(ql_adapter_state_t *);
96 static void ql_task_mgmt(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
97 ql_srb_t *);
98 static void ql_task_daemon(void *);
99 static void ql_task_thread(ql_adapter_state_t *);
100 static void ql_unsol_callback(ql_srb_t *);
101 static void ql_free_unsolicited_buffer(ql_adapter_state_t *,
102 fc_unsol_buf_t *);
103 static void ql_timer(void *);
104 static void ql_watchdog(ql_adapter_state_t *, uint32_t *, uint32_t *);
105 static void ql_cmd_timeout(ql_adapter_state_t *, ql_tgt_t *q, ql_srb_t *,
106 uint32_t *, uint32_t *);
107 static void ql_halt(ql_adapter_state_t *, int);
108 static int ql_els_plogi(ql_adapter_state_t *, fc_packet_t *);
109 static int ql_els_flogi(ql_adapter_state_t *, fc_packet_t *);
110 static int ql_els_logo(ql_adapter_state_t *, fc_packet_t *);
111 static int ql_els_prli(ql_adapter_state_t *, fc_packet_t *);
112 static int ql_els_prlo(ql_adapter_state_t *, fc_packet_t *);
113 static int ql_els_adisc(ql_adapter_state_t *, fc_packet_t *);
114 static int ql_els_linit(ql_adapter_state_t *, fc_packet_t *);
115 static int ql_els_lpc(ql_adapter_state_t *, fc_packet_t *);
116 static int ql_els_lsts(ql_adapter_state_t *, fc_packet_t *);
117 static int ql_els_scr(ql_adapter_state_t *, fc_packet_t *);
118 static int ql_els_rscn(ql_adapter_state_t *, fc_packet_t *);
119 static int ql_els_farp_req(ql_adapter_state_t *, fc_packet_t *);
120 static int ql_els_farp_reply(ql_adapter_state_t *, fc_packet_t *);
121 static int ql_els_rls(ql_adapter_state_t *, fc_packet_t *);
122 static int ql_els_rnid(ql_adapter_state_t *, fc_packet_t *);
123 static int ql_login_port(ql_adapter_state_t *, port_id_t);
124 static int ql_login_fabric_port(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
125 static int ql_logout_port(ql_adapter_state_t *, port_id_t);
126 static ql_lun_t *ql_lun_queue(ql_adapter_state_t *, ql_tgt_t *, uint16_t);
127 static int ql_fcp_scsi_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
128 static int ql_fcp_ip_cmd(ql_adapter_state_t *, fc_packet_t *, ql_srb_t *);
129 static int ql_fc_services(ql_adapter_state_t *, fc_packet_t *);
130 static int ql_poll_cmd(ql_adapter_state_t *, ql_srb_t *, time_t);
131 static int ql_start_cmd(ql_adapter_state_t *, ql_tgt_t *, fc_packet_t *,
132 ql_srb_t *);
133 static int ql_kstat_update(kstat_t *, int);
134 static ql_adapter_state_t *ql_fca_handle_to_state(opaque_t);
135 static ql_adapter_state_t *ql_cmd_setup(opaque_t, fc_packet_t *, int *);
136 static int ql_program_flash_address(ql_adapter_state_t *, uint32_t, uint8_t);
137 static void ql_rst_aen(ql_adapter_state_t *);
138 static void ql_restart_queues(ql_adapter_state_t *);
139 static void ql_abort_queues(ql_adapter_state_t *);
140 static void ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq);
141 static void ql_idle_check(ql_adapter_state_t *);
142 static int ql_loop_resync(ql_adapter_state_t *);
143 static size_t ql_24xx_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
144 static size_t ql_2581_ascii_fw_dump(ql_adapter_state_t *, caddr_t);
145 static int ql_save_config_regs(dev_info_t *);
146 static int ql_restore_config_regs(dev_info_t *);
147 static int ql_process_rscn(ql_adapter_state_t *, fc_affected_id_t *);
148 static int ql_handle_rscn_update(ql_adapter_state_t *);
149 static int ql_send_plogi(ql_adapter_state_t *, ql_tgt_t *, ql_head_t *);
150 static int ql_process_rscn_for_device(ql_adapter_state_t *, ql_tgt_t *);
151 static int ql_dump_firmware(ql_adapter_state_t *);
152 static int ql_process_logo_for_device(ql_adapter_state_t *, ql_tgt_t *);
153 static int ql_2200_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
154 static int ql_2300_binary_fw_dump(ql_adapter_state_t *, ql_fw_dump_t *);
155 static int ql_24xx_binary_fw_dump(ql_adapter_state_t *, ql_24xx_fw_dump_t *);
156 static int ql_25xx_binary_fw_dump(ql_adapter_state_t *, ql_25xx_fw_dump_t *);
157 static int ql_81xx_binary_fw_dump(ql_adapter_state_t *, ql_81xx_fw_dump_t *);
158 static int ql_read_risc_ram(ql_adapter_state_t *, uint32_t, uint32_t,
159 void *);
160 static void *ql_read_regs(ql_adapter_state_t *, void *, void *, uint32_t,
161 uint8_t);
162 static int ql_busy_plogi(ql_adapter_state_t *, fc_packet_t *, ql_tgt_t *);
163 static int ql_suspend_adapter(ql_adapter_state_t *);
164 static int ql_bstr_to_dec(char *, uint32_t *, uint32_t);
165 static void ql_update_rscn(ql_adapter_state_t *, fc_affected_id_t *);
166 int ql_alloc_dma_resouce(ql_adapter_state_t *, dma_mem_t *, int);
167 static int ql_bind_dma_buffer(ql_adapter_state_t *, dma_mem_t *, int);
168 static void ql_unbind_dma_buffer(ql_adapter_state_t *, dma_mem_t *);
169 static void ql_timeout_insert(ql_adapter_state_t *, ql_tgt_t *, ql_srb_t *);
170 static int ql_setup_interrupts(ql_adapter_state_t *);
171 static int ql_setup_msi(ql_adapter_state_t *);
172 static int ql_setup_msix(ql_adapter_state_t *);
173 static int ql_setup_fixed(ql_adapter_state_t *);
174 static void ql_release_intr(ql_adapter_state_t *);
175 static void ql_disable_intr(ql_adapter_state_t *);
176 static int ql_legacy_intr(ql_adapter_state_t *);
177 static int ql_init_mutex(ql_adapter_state_t *);
178 static void ql_destroy_mutex(ql_adapter_state_t *);
179 static void ql_iidma(ql_adapter_state_t *);
181 static int ql_n_port_plogi(ql_adapter_state_t *);
182 static void ql_fca_isp_els_request(ql_adapter_state_t *, fc_packet_t *,
183 els_descriptor_t *);
184 static void ql_isp_els_request_ctor(els_descriptor_t *,
185 els_passthru_entry_t *);
186 static int ql_p2p_plogi(ql_adapter_state_t *, fc_packet_t *);
187 static int ql_wait_for_td_stop(ql_adapter_state_t *);
188 static void ql_process_idc_event(ql_adapter_state_t *);
191 * Global data
193 static uint8_t ql_enable_pm = 1;
194 static int ql_flash_sbus_fpga = 0;
195 uint32_t ql_os_release_level = 11;
196 uint32_t ql_disable_aif = 0;
197 uint32_t ql_disable_msi = 0;
198 uint32_t ql_disable_msix = 0;
199 uint32_t ql_enable_ets = 0;
200 uint16_t ql_osc_wait_count = 1000;
202 /* Timer routine variables. */
203 static timeout_id_t ql_timer_timeout_id = NULL;
204 static clock_t ql_timer_ticks;
206 /* Soft state head pointer. */
207 void *ql_state = NULL;
209 /* Head adapter link. */
210 ql_head_t ql_hba = {
211 NULL,
212 NULL
215 /* Global hba index */
216 uint32_t ql_gfru_hba_index = 1;
219 * Some IP defines and globals
221 uint32_t ql_ip_buffer_count = 128;
222 uint32_t ql_ip_low_water = 10;
223 uint8_t ql_ip_fast_post_count = 5;
224 static int ql_ip_mtu = 65280; /* equivalent to FCIPMTU */
226 /* Device AL_PA to Device Head Queue index array. */
227 uint8_t ql_alpa_to_index[] = {
228 0x7e, 0x7d, 0x7c, 0x00, 0x7b, 0x01, 0x02, 0x03, 0x7a, 0x04,
229 0x05, 0x06, 0x07, 0x08, 0x09, 0x79, 0x78, 0x0a, 0x0b, 0x0c,
230 0x0d, 0x0e, 0x0f, 0x77, 0x76, 0x10, 0x11, 0x75, 0x12, 0x74,
231 0x73, 0x72, 0x13, 0x14, 0x15, 0x71, 0x16, 0x70, 0x6f, 0x6e,
232 0x17, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x18, 0x19, 0x67,
233 0x66, 0x65, 0x64, 0x63, 0x62, 0x20, 0x21, 0x61, 0x60, 0x23,
234 0x5f, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x5e, 0x2a, 0x5d,
235 0x5c, 0x5b, 0x2b, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x2c,
236 0x2d, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x2e, 0x2f, 0x4e,
237 0x4d, 0x30, 0x4c, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x4b,
238 0x37, 0x4a, 0x49, 0x48, 0x38, 0x47, 0x46, 0x45, 0x44, 0x43,
239 0x42, 0x39, 0x3a, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3b,
240 0x3c, 0x3b, 0x3a, 0x3d, 0x39, 0x3e, 0x3f, 0x40, 0x38, 0x37,
241 0x36, 0x41, 0x35, 0x42, 0x43, 0x44, 0x34, 0x45, 0x46, 0x47,
242 0x48, 0x49, 0x4a, 0x33, 0x32, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
243 0x50, 0x31, 0x30, 0x51, 0x52, 0x2f, 0x53, 0x2e, 0x2d, 0x2c,
244 0x54, 0x55, 0x56, 0x2b, 0x57, 0x2a, 0x29, 0x28, 0x58, 0x27,
245 0x26, 0x25, 0x24, 0x23, 0x22, 0x59, 0x5a, 0x21, 0x20, 0x1f,
246 0x1e, 0x1d, 0x1c, 0x5b, 0x5c, 0x1b, 0x1a, 0x5d, 0x19, 0x5e,
247 0x5f, 0x60, 0x61, 0x62, 0x63, 0x18, 0x64, 0x17, 0x16, 0x15,
248 0x65, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x66, 0x67, 0x0e,
249 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x68, 0x69, 0x08, 0x07, 0x6a,
250 0x06, 0x6b, 0x6c, 0x6d, 0x05, 0x04, 0x03, 0x6e, 0x02, 0x6f,
251 0x70, 0x71, 0x01, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x00,
252 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7f, 0x80, 0x00, 0x01,
253 0x02, 0x03, 0x80, 0x7f, 0x7e, 0x04
256 /* Device loop_id to ALPA array. */
257 static uint8_t ql_index_to_alpa[] = {
258 0xef, 0xe8, 0xe4, 0xe2, 0xe1, 0xe0, 0xdc, 0xda, 0xd9, 0xd6,
259 0xd5, 0xd4, 0xd3, 0xd2, 0xd1, 0xce, 0xcd, 0xcc, 0xcb, 0xca,
260 0xc9, 0xc7, 0xc6, 0xc5, 0xc3, 0xbc, 0xba, 0xb9, 0xb6, 0xb5,
261 0xb4, 0xb3, 0xb2, 0xb1, 0xae, 0xad, 0xac, 0xab, 0xaa, 0xa9,
262 0xa7, 0xa6, 0xa5, 0xa3, 0x9f, 0x9e, 0x9d, 0x9b, 0x98, 0x97,
263 0x90, 0x8f, 0x88, 0x84, 0x82, 0x81, 0x80, 0x7c, 0x7a, 0x79,
264 0x76, 0x75, 0x74, 0x73, 0x72, 0x71, 0x6e, 0x6d, 0x6c, 0x6b,
265 0x6a, 0x69, 0x67, 0x66, 0x65, 0x63, 0x5c, 0x5a, 0x59, 0x56,
266 0x55, 0x54, 0x53, 0x52, 0x51, 0x4e, 0x4d, 0x4c, 0x4b, 0x4a,
267 0x49, 0x47, 0x46, 0x45, 0x43, 0x3c, 0x3a, 0x39, 0x36, 0x35,
268 0x34, 0x33, 0x32, 0x31, 0x2e, 0x2d, 0x2c, 0x2b, 0x2a, 0x29,
269 0x27, 0x26, 0x25, 0x23, 0x1f, 0x1e, 0x1d, 0x1b, 0x18, 0x17,
270 0x10, 0x0f, 0x08, 0x04, 0x02, 0x01
273 /* 2200 register offsets */
274 static reg_off_t reg_off_2200 = {
275 0x00, /* flash_address */
276 0x02, /* flash_data */
277 0x06, /* ctrl_status */
278 0x08, /* ictrl */
279 0x0a, /* istatus */
280 0x0c, /* semaphore */
281 0x0e, /* nvram */
282 0x18, /* req_in */
283 0x18, /* req_out */
284 0x1a, /* resp_in */
285 0x1a, /* resp_out */
286 0xff, /* risc2host - n/a */
287 24, /* Number of mailboxes */
289 /* Mailbox in register offsets 0 - 23 */
290 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
291 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
292 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
293 /* 2200 does not have mailbox 24-31 - n/a */
294 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
296 /* Mailbox out register offsets 0 - 23 */
297 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
298 0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee,
299 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
300 /* 2200 does not have mailbox 24-31 - n/a */
301 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
303 0x96, /* fpm_diag_config */
304 0xa4, /* pcr */
305 0xb0, /* mctr */
306 0xb8, /* fb_cmd */
307 0xc0, /* hccr */
308 0xcc, /* gpiod */
309 0xce, /* gpioe */
310 0xff, /* host_to_host_sema - n/a */
311 0xff, /* pri_req_in - n/a */
312 0xff, /* pri_req_out - n/a */
313 0xff, /* atio_req_in - n/a */
314 0xff, /* atio_req_out - n/a */
315 0xff, /* io_base_addr - n/a */
316 0xff, /* nx_host_int - n/a */
317 0xff /* nx_risc_int - n/a */
320 /* 2300 register offsets */
321 static reg_off_t reg_off_2300 = {
322 0x00, /* flash_address */
323 0x02, /* flash_data */
324 0x06, /* ctrl_status */
325 0x08, /* ictrl */
326 0x0a, /* istatus */
327 0x0c, /* semaphore */
328 0x0e, /* nvram */
329 0x10, /* req_in */
330 0x12, /* req_out */
331 0x14, /* resp_in */
332 0x16, /* resp_out */
333 0x18, /* risc2host */
334 32, /* Number of mailboxes */
336 /* Mailbox in register offsets 0 - 31 */
337 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
338 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
339 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
340 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
342 /* Mailbox out register offsets 0 - 31 */
343 0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e,
344 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
345 0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e,
346 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
348 0x96, /* fpm_diag_config */
349 0xa4, /* pcr */
350 0xb0, /* mctr */
351 0x80, /* fb_cmd */
352 0xc0, /* hccr */
353 0xcc, /* gpiod */
354 0xce, /* gpioe */
355 0x1c, /* host_to_host_sema */
356 0xff, /* pri_req_in - n/a */
357 0xff, /* pri_req_out - n/a */
358 0xff, /* atio_req_in - n/a */
359 0xff, /* atio_req_out - n/a */
360 0xff, /* io_base_addr - n/a */
361 0xff, /* nx_host_int - n/a */
362 0xff /* nx_risc_int - n/a */
365 /* 2400/2500 register offsets */
366 reg_off_t reg_off_2400_2500 = {
367 0x00, /* flash_address */
368 0x04, /* flash_data */
369 0x08, /* ctrl_status */
370 0x0c, /* ictrl */
371 0x10, /* istatus */
372 0xff, /* semaphore - n/a */
373 0xff, /* nvram - n/a */
374 0x1c, /* req_in */
375 0x20, /* req_out */
376 0x24, /* resp_in */
377 0x28, /* resp_out */
378 0x44, /* risc2host */
379 32, /* Number of mailboxes */
381 /* Mailbox in register offsets 0 - 31 */
382 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
383 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
384 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
385 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
387 /* Mailbox out register offsets 0 - 31 */
388 0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e,
389 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
390 0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae,
391 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
393 0xff, /* fpm_diag_config - n/a */
394 0xff, /* pcr - n/a */
395 0xff, /* mctr - n/a */
396 0xff, /* fb_cmd - n/a */
397 0x48, /* hccr */
398 0x4c, /* gpiod */
399 0x50, /* gpioe */
400 0xff, /* host_to_host_sema - n/a */
401 0x2c, /* pri_req_in */
402 0x30, /* pri_req_out */
403 0x3c, /* atio_req_in */
404 0x40, /* atio_req_out */
405 0x54, /* io_base_addr */
406 0xff, /* nx_host_int - n/a */
407 0xff /* nx_risc_int - n/a */
410 /* P3 register offsets */
411 static reg_off_t reg_off_8021 = {
412 0x00, /* flash_address */
413 0x04, /* flash_data */
414 0x08, /* ctrl_status */
415 0x0c, /* ictrl */
416 0x10, /* istatus */
417 0xff, /* semaphore - n/a */
418 0xff, /* nvram - n/a */
419 0xff, /* req_in - n/a */
420 0x0, /* req_out */
421 0x100, /* resp_in */
422 0x200, /* resp_out */
423 0x500, /* risc2host */
424 32, /* Number of mailboxes */
426 /* Mailbox in register offsets 0 - 31 */
427 0x300, 0x302, 0x304, 0x306, 0x308, 0x30a, 0x30c, 0x30e,
428 0x310, 0x312, 0x314, 0x316, 0x318, 0x31a, 0x31c, 0x31e,
429 0x320, 0x322, 0x324, 0x326, 0x328, 0x32a, 0x32c, 0x32e,
430 0x330, 0x332, 0x334, 0x336, 0x338, 0x33a, 0x33c, 0x33e,
432 /* Mailbox out register offsets 0 - 31 */
433 0x400, 0x402, 0x404, 0x406, 0x408, 0x40a, 0x40c, 0x40e,
434 0x410, 0x412, 0x414, 0x416, 0x418, 0x41a, 0x41c, 0x41e,
435 0x420, 0x422, 0x424, 0x426, 0x428, 0x42a, 0x42c, 0x42e,
436 0x430, 0x432, 0x434, 0x436, 0x438, 0x43a, 0x43c, 0x43e,
438 0xff, /* fpm_diag_config - n/a */
439 0xff, /* pcr - n/a */
440 0xff, /* mctr - n/a */
441 0xff, /* fb_cmd - n/a */
442 0x48, /* hccr */
443 0x4c, /* gpiod */
444 0x50, /* gpioe */
445 0xff, /* host_to_host_sema - n/a */
446 0x2c, /* pri_req_in */
447 0x30, /* pri_req_out */
448 0x3c, /* atio_req_in */
449 0x40, /* atio_req_out */
450 0x54, /* io_base_addr */
451 0x380, /* nx_host_int */
452 0x504 /* nx_risc_int */
455 /* mutex for protecting variables shared by all instances of the driver */
456 kmutex_t ql_global_mutex;
457 kmutex_t ql_global_hw_mutex;
458 kmutex_t ql_global_el_mutex;
460 /* DMA access attribute structure. */
461 static ddi_device_acc_attr_t ql_dev_acc_attr = {
462 DDI_DEVICE_ATTR_V0,
463 DDI_STRUCTURE_LE_ACC,
464 DDI_STRICTORDER_ACC
467 /* I/O DMA attributes structures. */
468 static ddi_dma_attr_t ql_64bit_io_dma_attr = {
469 DMA_ATTR_V0, /* dma_attr_version */
470 QL_DMA_LOW_ADDRESS, /* low DMA address range */
471 QL_DMA_HIGH_64BIT_ADDRESS, /* high DMA address range */
472 QL_DMA_XFER_COUNTER, /* DMA counter register */
473 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
474 QL_DMA_BURSTSIZES, /* DMA burstsizes */
475 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
476 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
477 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
478 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
479 QL_DMA_GRANULARITY, /* granularity of device */
480 QL_DMA_XFER_FLAGS /* DMA transfer flags */
483 static ddi_dma_attr_t ql_32bit_io_dma_attr = {
484 DMA_ATTR_V0, /* dma_attr_version */
485 QL_DMA_LOW_ADDRESS, /* low DMA address range */
486 QL_DMA_HIGH_32BIT_ADDRESS, /* high DMA address range */
487 QL_DMA_XFER_COUNTER, /* DMA counter register */
488 QL_DMA_ADDRESS_ALIGNMENT, /* DMA address alignment */
489 QL_DMA_BURSTSIZES, /* DMA burstsizes */
490 QL_DMA_MIN_XFER_SIZE, /* min effective DMA size */
491 QL_DMA_MAX_XFER_SIZE, /* max DMA xfer size */
492 QL_DMA_SEGMENT_BOUNDARY, /* segment boundary */
493 QL_DMA_SG_LIST_LENGTH, /* s/g list length */
494 QL_DMA_GRANULARITY, /* granularity of device */
495 QL_DMA_XFER_FLAGS /* DMA transfer flags */
498 /* Load the default dma attributes */
499 static ddi_dma_attr_t ql_32fcsm_cmd_dma_attr;
500 static ddi_dma_attr_t ql_64fcsm_cmd_dma_attr;
501 static ddi_dma_attr_t ql_32fcsm_rsp_dma_attr;
502 static ddi_dma_attr_t ql_64fcsm_rsp_dma_attr;
503 static ddi_dma_attr_t ql_32fcip_cmd_dma_attr;
504 static ddi_dma_attr_t ql_64fcip_cmd_dma_attr;
505 static ddi_dma_attr_t ql_32fcip_rsp_dma_attr;
506 static ddi_dma_attr_t ql_64fcip_rsp_dma_attr;
507 static ddi_dma_attr_t ql_32fcp_cmd_dma_attr;
508 static ddi_dma_attr_t ql_64fcp_cmd_dma_attr;
509 static ddi_dma_attr_t ql_32fcp_rsp_dma_attr;
510 static ddi_dma_attr_t ql_64fcp_rsp_dma_attr;
511 static ddi_dma_attr_t ql_32fcp_data_dma_attr;
512 static ddi_dma_attr_t ql_64fcp_data_dma_attr;
514 /* Static declarations of cb_ops entry point functions... */
515 static struct cb_ops ql_cb_ops = {
516 ql_open, /* b/c open */
517 ql_close, /* b/c close */
518 nodev, /* b strategy */
519 nodev, /* b print */
520 nodev, /* b dump */
521 nodev, /* c read */
522 nodev, /* c write */
523 ql_ioctl, /* c ioctl */
524 nodev, /* c devmap */
525 nodev, /* c mmap */
526 nodev, /* c segmap */
527 nochpoll, /* c poll */
528 nodev, /* cb_prop_op */
529 NULL, /* streamtab */
530 D_MP | D_NEW | D_HOTPLUG, /* Driver compatibility flag */
531 CB_REV, /* cb_ops revision */
532 nodev, /* c aread */
533 nodev /* c awrite */
536 /* Static declarations of dev_ops entry point functions... */
537 static struct dev_ops ql_devops = {
538 DEVO_REV, /* devo_rev */
539 0, /* refcnt */
540 ql_getinfo, /* getinfo */
541 nulldev, /* identify */
542 nulldev, /* probe */
543 ql_attach, /* attach */
544 ql_detach, /* detach */
545 nodev, /* reset */
546 &ql_cb_ops, /* char/block ops */
547 NULL, /* bus operations */
548 ql_power, /* power management */
549 ql_quiesce /* quiesce device */
552 /* ELS command code to text converter */
553 cmd_table_t els_cmd_tbl[] = ELS_CMD_TABLE();
554 /* Mailbox command code to text converter */
555 cmd_table_t mbox_cmd_tbl[] = MBOX_CMD_TABLE();
557 char qlc_driver_version[] = QL_VERSION;
560 * Loadable Driver Interface Structures.
561 * Declare and initialize the module configuration section...
563 static struct modldrv modldrv = {
564 &mod_driverops, /* type of module: driver */
565 "SunFC Qlogic FCA v" QL_VERSION, /* name of module */
566 &ql_devops /* driver dev_ops */
569 static struct modlinkage modlinkage = {
570 MODREV_1,
571 &modldrv,
572 NULL
575 /* ************************************************************************ */
576 /* Loadable Module Routines. */
577 /* ************************************************************************ */
580 * _init
581 * Initializes a loadable module. It is called before any other
582 * routine in a loadable module.
584 * Returns:
585 * 0 = success
587 * Context:
588 * Kernel context.
591 _init(void)
593 int rval = 0;
595 if (ql_os_release_level < 6) {
596 cmn_err(CE_WARN, "%s Unsupported OS release level = %d",
597 QL_NAME, ql_os_release_level);
598 rval = EINVAL;
600 if (ql_os_release_level == 6) {
601 ql_32bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
602 ql_64bit_io_dma_attr.dma_attr_count_max = 0x00ffffff;
605 if (rval == 0) {
606 rval = ddi_soft_state_init(&ql_state,
607 sizeof (ql_adapter_state_t), 0);
609 if (rval == 0) {
610 /* allow the FC Transport to tweak the dev_ops */
611 fc_fca_init(&ql_devops);
613 mutex_init(&ql_global_mutex, NULL, MUTEX_DRIVER, NULL);
614 mutex_init(&ql_global_hw_mutex, NULL, MUTEX_DRIVER, NULL);
615 mutex_init(&ql_global_el_mutex, NULL, MUTEX_DRIVER, NULL);
616 rval = mod_install(&modlinkage);
617 if (rval != 0) {
618 mutex_destroy(&ql_global_hw_mutex);
619 mutex_destroy(&ql_global_mutex);
620 mutex_destroy(&ql_global_el_mutex);
621 ddi_soft_state_fini(&ql_state);
622 } else {
623 /*EMPTY*/
624 ql_32fcsm_cmd_dma_attr = ql_32bit_io_dma_attr;
625 ql_64fcsm_cmd_dma_attr = ql_64bit_io_dma_attr;
626 ql_32fcsm_rsp_dma_attr = ql_32bit_io_dma_attr;
627 ql_64fcsm_rsp_dma_attr = ql_64bit_io_dma_attr;
628 ql_32fcip_cmd_dma_attr = ql_32bit_io_dma_attr;
629 ql_64fcip_cmd_dma_attr = ql_64bit_io_dma_attr;
630 ql_32fcip_rsp_dma_attr = ql_32bit_io_dma_attr;
631 ql_64fcip_rsp_dma_attr = ql_64bit_io_dma_attr;
632 ql_32fcp_cmd_dma_attr = ql_32bit_io_dma_attr;
633 ql_64fcp_cmd_dma_attr = ql_64bit_io_dma_attr;
634 ql_32fcp_rsp_dma_attr = ql_32bit_io_dma_attr;
635 ql_64fcp_rsp_dma_attr = ql_64bit_io_dma_attr;
636 ql_32fcp_data_dma_attr = ql_32bit_io_dma_attr;
637 ql_64fcp_data_dma_attr = ql_64bit_io_dma_attr;
638 ql_32fcsm_cmd_dma_attr.dma_attr_sgllen =
639 ql_64fcsm_cmd_dma_attr.dma_attr_sgllen =
640 QL_FCSM_CMD_SGLLEN;
641 ql_32fcsm_rsp_dma_attr.dma_attr_sgllen =
642 ql_64fcsm_rsp_dma_attr.dma_attr_sgllen =
643 QL_FCSM_RSP_SGLLEN;
644 ql_32fcip_cmd_dma_attr.dma_attr_sgllen =
645 ql_64fcip_cmd_dma_attr.dma_attr_sgllen =
646 QL_FCIP_CMD_SGLLEN;
647 ql_32fcip_rsp_dma_attr.dma_attr_sgllen =
648 ql_64fcip_rsp_dma_attr.dma_attr_sgllen =
649 QL_FCIP_RSP_SGLLEN;
650 ql_32fcp_cmd_dma_attr.dma_attr_sgllen =
651 ql_64fcp_cmd_dma_attr.dma_attr_sgllen =
652 QL_FCP_CMD_SGLLEN;
653 ql_32fcp_rsp_dma_attr.dma_attr_sgllen =
654 ql_64fcp_rsp_dma_attr.dma_attr_sgllen =
655 QL_FCP_RSP_SGLLEN;
659 if (rval != 0) {
660 cmn_err(CE_CONT, "?Unable to install/attach driver '%s'",
661 QL_NAME);
664 return (rval);
668 * _fini
669 * Prepares a module for unloading. It is called when the system
670 * wants to unload a module. If the module determines that it can
671 * be unloaded, then _fini() returns the value returned by
672 * mod_remove(). Upon successful return from _fini() no other
673 * routine in the module will be called before _init() is called.
675 * Returns:
676 * 0 = success
678 * Context:
679 * Kernel context.
682 _fini(void)
684 int rval;
686 rval = mod_remove(&modlinkage);
687 if (rval == 0) {
688 mutex_destroy(&ql_global_hw_mutex);
689 mutex_destroy(&ql_global_mutex);
690 mutex_destroy(&ql_global_el_mutex);
691 ddi_soft_state_fini(&ql_state);
694 return (rval);
698 * _info
699 * Returns information about loadable module.
701 * Input:
702 * modinfo = pointer to module information structure.
704 * Returns:
705 * Value returned by mod_info().
707 * Context:
708 * Kernel context.
711 _info(struct modinfo *modinfop)
713 return (mod_info(&modlinkage, modinfop));
716 /* ************************************************************************ */
717 /* dev_ops functions */
718 /* ************************************************************************ */
721 * ql_getinfo
722 * Returns the pointer associated with arg when cmd is
723 * set to DDI_INFO_DEVT2DEVINFO, or it should return the
724 * instance number associated with arg when cmd is set
725 * to DDI_INFO_DEV2INSTANCE.
727 * Input:
728 * dip = Do not use.
729 * cmd = command argument.
730 * arg = command specific argument.
731 * resultp = pointer to where request information is stored.
733 * Returns:
734 * DDI_SUCCESS or DDI_FAILURE.
736 * Context:
737 * Kernel context.
739 /* ARGSUSED */
740 static int
741 ql_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **resultp)
743 ql_adapter_state_t *ha;
744 int minor;
745 int rval = DDI_FAILURE;
747 minor = (int)(getminor((dev_t)arg));
748 ha = ddi_get_soft_state(ql_state, minor);
749 if (ha == NULL) {
750 QL_PRINT_2(CE_CONT, "failed, unknown minor=%d\n",
751 getminor((dev_t)arg));
752 *resultp = NULL;
753 return (rval);
756 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
758 switch (cmd) {
759 case DDI_INFO_DEVT2DEVINFO:
760 *resultp = ha->dip;
761 rval = DDI_SUCCESS;
762 break;
763 case DDI_INFO_DEVT2INSTANCE:
764 *resultp = (void *)(uintptr_t)(ha->instance);
765 rval = DDI_SUCCESS;
766 break;
767 default:
768 EL(ha, "failed, unsupported cmd=%d\n", cmd);
769 rval = DDI_FAILURE;
770 break;
773 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
775 return (rval);
779 * ql_attach
780 * Configure and attach an instance of the driver
781 * for a port.
783 * Input:
784 * dip = pointer to device information structure.
785 * cmd = attach type.
787 * Returns:
788 * DDI_SUCCESS or DDI_FAILURE.
790 * Context:
791 * Kernel context.
793 static int
794 ql_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
796 off_t regsize;
797 uint32_t size;
798 int rval, *ptr;
799 int instance;
800 uint_t progress = 0;
801 char *buf;
802 ushort_t caps_ptr, cap;
803 fc_fca_tran_t *tran;
804 ql_adapter_state_t *ha = NULL;
806 static char *pmcomps[] = {
807 NULL,
808 PM_LEVEL_D3_STR, /* Device OFF */
809 PM_LEVEL_D0_STR, /* Device ON */
812 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n",
813 ddi_get_instance(dip), cmd);
815 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
817 switch (cmd) {
818 case DDI_ATTACH:
819 /* first get the instance */
820 instance = ddi_get_instance(dip);
822 cmn_err(CE_CONT, "!Qlogic %s(%d) FCA Driver v%s\n",
823 QL_NAME, instance, QL_VERSION);
825 /* Correct OS version? */
826 if (ql_os_release_level != 11) {
827 cmn_err(CE_WARN, "%s(%d): This driver is for Solaris "
828 "11", QL_NAME, instance);
829 goto attach_failed;
832 /* Hardware is installed in a DMA-capable slot? */
833 if (ddi_slaveonly(dip) == DDI_SUCCESS) {
834 cmn_err(CE_WARN, "%s(%d): slave only", QL_NAME,
835 instance);
836 goto attach_failed;
839 /* No support for high-level interrupts */
840 if (ddi_intr_hilevel(dip, 0) != 0) {
841 cmn_err(CE_WARN, "%s(%d): High level interrupt"
842 " not supported", QL_NAME, instance);
843 goto attach_failed;
846 /* Allocate our per-device-instance structure */
847 if (ddi_soft_state_zalloc(ql_state,
848 instance) != DDI_SUCCESS) {
849 cmn_err(CE_WARN, "%s(%d): soft state alloc failed",
850 QL_NAME, instance);
851 goto attach_failed;
853 progress |= QL_SOFT_STATE_ALLOCED;
855 ha = ddi_get_soft_state(ql_state, instance);
856 if (ha == NULL) {
857 cmn_err(CE_WARN, "%s(%d): can't get soft state",
858 QL_NAME, instance);
859 goto attach_failed;
861 ha->dip = dip;
862 ha->instance = instance;
863 ha->hba.base_address = ha;
864 ha->pha = ha;
866 if (ql_el_trace_desc_ctor(ha) != DDI_SUCCESS) {
867 cmn_err(CE_WARN, "%s(%d): can't setup el tracing",
868 QL_NAME, instance);
869 goto attach_failed;
872 /* Get extended logging and dump flags. */
873 ql_common_properties(ha);
875 if (strcmp(ddi_driver_name(ddi_get_parent(dip)),
876 "sbus") == 0) {
877 EL(ha, "%s SBUS card detected", QL_NAME);
878 ha->cfg_flags |= CFG_SBUS_CARD;
881 ha->dev = kmem_zalloc(sizeof (*ha->dev) *
882 DEVICE_HEAD_LIST_SIZE, KM_SLEEP);
884 ha->outstanding_cmds = kmem_zalloc(
885 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS,
886 KM_SLEEP);
888 ha->ub_array = kmem_zalloc(sizeof (*ha->ub_array) *
889 QL_UB_LIMIT, KM_SLEEP);
891 ha->adapter_stats = kmem_zalloc(sizeof (*ha->adapter_stats),
892 KM_SLEEP);
894 (void) ddi_pathname(dip, buf);
895 ha->devpath = kmem_zalloc(strlen(buf)+1, KM_SLEEP);
896 if (ha->devpath == NULL) {
897 EL(ha, "devpath mem alloc failed\n");
898 } else {
899 (void) strcpy(ha->devpath, buf);
900 EL(ha, "devpath is: %s\n", ha->devpath);
903 if (CFG_IST(ha, CFG_SBUS_CARD)) {
905 * For cards where PCI is mapped to sbus e.g. Ivory.
907 * 0x00 : 0x000 - 0x0FF PCI Config Space for 2200
908 * : 0x100 - 0x3FF PCI IO space for 2200
909 * 0x01 : 0x000 - 0x0FF PCI Config Space for fpga
910 * : 0x100 - 0x3FF PCI IO Space for fpga
912 if (ddi_regs_map_setup(dip, 0, (caddr_t *)&ha->iobase,
913 0x100, 0x300, &ql_dev_acc_attr, &ha->dev_handle) !=
914 DDI_SUCCESS) {
915 cmn_err(CE_WARN, "%s(%d): Unable to map device"
916 " registers", QL_NAME, instance);
917 goto attach_failed;
919 if (ddi_regs_map_setup(dip, 1,
920 (caddr_t *)&ha->sbus_fpga_iobase, 0, 0x400,
921 &ql_dev_acc_attr, &ha->sbus_fpga_dev_handle) !=
922 DDI_SUCCESS) {
923 /* We should not fail attach here */
924 cmn_err(CE_WARN, "%s(%d): Unable to map FPGA",
925 QL_NAME, instance);
926 ha->sbus_fpga_iobase = NULL;
928 progress |= QL_REGS_MAPPED;
931 * We should map config space before adding interrupt
932 * So that the chip type (2200 or 2300) can be
933 * determined before the interrupt routine gets a
934 * chance to execute.
936 if (ddi_regs_map_setup(dip, 0,
937 (caddr_t *)&ha->sbus_config_base, 0, 0x100,
938 &ql_dev_acc_attr, &ha->sbus_config_handle) !=
939 DDI_SUCCESS) {
940 cmn_err(CE_WARN, "%s(%d): Unable to map sbus "
941 "config registers", QL_NAME, instance);
942 goto attach_failed;
944 progress |= QL_CONFIG_SPACE_SETUP;
945 } else {
946 /*LINTED [Solaris DDI_DEV_T_ANY Lint error]*/
947 rval = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip,
948 DDI_PROP_DONTPASS, "reg", &ptr, &size);
949 if (rval != DDI_PROP_SUCCESS) {
950 cmn_err(CE_WARN, "%s(%d): Unable to get PCI "
951 "address registers", QL_NAME, instance);
952 goto attach_failed;
953 } else {
954 ha->pci_bus_addr = ptr[0];
955 ha->function_number = (uint8_t)
956 (ha->pci_bus_addr >> 8 & 7);
957 ddi_prop_free(ptr);
961 * We should map config space before adding interrupt
962 * So that the chip type (2200 or 2300) can be
963 * determined before the interrupt routine gets a
964 * chance to execute.
966 if (pci_config_setup(ha->dip, &ha->pci_handle) !=
967 DDI_SUCCESS) {
968 cmn_err(CE_WARN, "%s(%d): can't setup PCI "
969 "config space", QL_NAME, instance);
970 goto attach_failed;
972 progress |= QL_CONFIG_SPACE_SETUP;
975 * Setup the ISP2200 registers address mapping to be
976 * accessed by this particular driver.
977 * 0x0 Configuration Space
978 * 0x1 I/O Space
979 * 0x2 32-bit Memory Space address
980 * 0x3 64-bit Memory Space address
982 size = ql_pci_config_get32(ha, PCI_CONF_BASE0) & BIT_0 ?
983 2 : 1;
984 if (ddi_dev_regsize(dip, size, &regsize) !=
985 DDI_SUCCESS ||
986 ddi_regs_map_setup(dip, size, &ha->iobase,
987 0, regsize, &ql_dev_acc_attr, &ha->dev_handle) !=
988 DDI_SUCCESS) {
989 cmn_err(CE_WARN, "%s(%d): regs_map_setup(mem) "
990 "failed", QL_NAME, instance);
991 goto attach_failed;
993 progress |= QL_REGS_MAPPED;
996 * We need I/O space mappings for 23xx HBAs for
997 * loading flash (FCode). The chip has a bug due to
998 * which loading flash fails through mem space
999 * mappings in PCI-X mode.
1001 if (size == 1) {
1002 ha->iomap_iobase = ha->iobase;
1003 ha->iomap_dev_handle = ha->dev_handle;
1004 } else {
1005 if (ddi_dev_regsize(dip, 1, &regsize) !=
1006 DDI_SUCCESS ||
1007 ddi_regs_map_setup(dip, 1,
1008 &ha->iomap_iobase, 0, regsize,
1009 &ql_dev_acc_attr, &ha->iomap_dev_handle) !=
1010 DDI_SUCCESS) {
1011 cmn_err(CE_WARN, "%s(%d): regs_map_"
1012 "setup(I/O) failed", QL_NAME,
1013 instance);
1014 goto attach_failed;
1016 progress |= QL_IOMAP_IOBASE_MAPPED;
1020 ha->subsys_id = (uint16_t)ql_pci_config_get16(ha,
1021 PCI_CONF_SUBSYSID);
1022 ha->subven_id = (uint16_t)ql_pci_config_get16(ha,
1023 PCI_CONF_SUBVENID);
1024 ha->ven_id = (uint16_t)ql_pci_config_get16(ha,
1025 PCI_CONF_VENID);
1026 ha->device_id = (uint16_t)ql_pci_config_get16(ha,
1027 PCI_CONF_DEVID);
1028 ha->rev_id = (uint8_t)ql_pci_config_get8(ha,
1029 PCI_CONF_REVID);
1031 EL(ha, "ISP%x chip detected (RevID=%x, VenID=%x, SVenID=%x, "
1032 "SSysID=%x)\n", ha->device_id, ha->rev_id, ha->ven_id,
1033 ha->subven_id, ha->subsys_id);
1035 switch (ha->device_id) {
1036 case 0x2300:
1037 case 0x2312:
1038 case 0x2322:
1039 case 0x6312:
1040 case 0x6322:
1041 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1042 ha->flags |= FUNCTION_1;
1044 if ((ha->device_id == 0x6322) ||
1045 (ha->device_id == 0x2322)) {
1046 ha->cfg_flags |= CFG_CTRL_6322;
1047 ha->fw_class = 0x6322;
1048 ha->risc_dump_size = QL_6322_FW_DUMP_SIZE;
1049 } else {
1050 ha->cfg_flags |= CFG_CTRL_2300;
1051 ha->fw_class = 0x2300;
1052 ha->risc_dump_size = QL_2300_FW_DUMP_SIZE;
1054 ha->reg_off = &reg_off_2300;
1055 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1056 goto attach_failed;
1058 ha->fcp_cmd = ql_command_iocb;
1059 ha->ip_cmd = ql_ip_iocb;
1060 ha->ms_cmd = ql_ms_iocb;
1061 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1062 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1063 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1064 } else {
1065 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1066 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1068 break;
1070 case 0x2200:
1071 ha->cfg_flags |= CFG_CTRL_2200;
1072 ha->reg_off = &reg_off_2200;
1073 ha->fw_class = 0x2200;
1074 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1075 goto attach_failed;
1077 ha->risc_dump_size = QL_2200_FW_DUMP_SIZE;
1078 ha->fcp_cmd = ql_command_iocb;
1079 ha->ip_cmd = ql_ip_iocb;
1080 ha->ms_cmd = ql_ms_iocb;
1081 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1082 ha->cmd_segs = CMD_TYPE_2_DATA_SEGMENTS;
1083 ha->cmd_cont_segs = CONT_TYPE_0_DATA_SEGMENTS;
1084 } else {
1085 ha->cmd_segs = CMD_TYPE_3_DATA_SEGMENTS;
1086 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1088 break;
1090 case 0x2422:
1091 case 0x2432:
1092 case 0x5422:
1093 case 0x5432:
1094 case 0x8432:
1095 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1096 ha->flags |= FUNCTION_1;
1098 ha->cfg_flags |= CFG_CTRL_2422;
1099 if (ha->device_id == 0x8432) {
1100 ha->cfg_flags |= CFG_CTRL_MENLO;
1101 } else {
1102 ha->flags |= VP_ENABLED;
1105 ha->reg_off = &reg_off_2400_2500;
1106 ha->fw_class = 0x2400;
1107 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1108 goto attach_failed;
1110 ha->risc_dump_size = QL_24XX_FW_DUMP_SIZE;
1111 ha->fcp_cmd = ql_command_24xx_iocb;
1112 ha->ip_cmd = ql_ip_24xx_iocb;
1113 ha->ms_cmd = ql_ms_24xx_iocb;
1114 ha->els_cmd = ql_els_24xx_iocb;
1115 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1116 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1117 break;
1119 case 0x2522:
1120 case 0x2532:
1121 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 2) {
1122 ha->flags |= FUNCTION_1;
1124 ha->cfg_flags |= CFG_CTRL_25XX;
1125 ha->flags |= VP_ENABLED;
1126 ha->fw_class = 0x2500;
1127 ha->reg_off = &reg_off_2400_2500;
1128 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1129 goto attach_failed;
1131 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1132 ha->fcp_cmd = ql_command_24xx_iocb;
1133 ha->ip_cmd = ql_ip_24xx_iocb;
1134 ha->ms_cmd = ql_ms_24xx_iocb;
1135 ha->els_cmd = ql_els_24xx_iocb;
1136 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1137 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1138 break;
1140 case 0x8001:
1141 if (ql_pci_config_get8(ha, PCI_CONF_IPIN) == 4) {
1142 ha->flags |= FUNCTION_1;
1144 ha->cfg_flags |= CFG_CTRL_81XX;
1145 ha->flags |= VP_ENABLED;
1146 ha->fw_class = 0x8100;
1147 ha->reg_off = &reg_off_2400_2500;
1148 if (ql_fwmodule_resolve(ha) != QL_SUCCESS) {
1149 goto attach_failed;
1151 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1152 ha->fcp_cmd = ql_command_24xx_iocb;
1153 ha->ip_cmd = ql_ip_24xx_iocb;
1154 ha->ms_cmd = ql_ms_24xx_iocb;
1155 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1156 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1157 break;
1159 case 0x8021:
1160 if (ha->function_number & BIT_0) {
1161 ha->flags |= FUNCTION_1;
1163 ha->cfg_flags |= CFG_CTRL_8021;
1164 ha->reg_off = &reg_off_8021;
1165 ha->risc_dump_size = QL_25XX_FW_DUMP_SIZE;
1166 ha->fcp_cmd = ql_command_24xx_iocb;
1167 ha->ms_cmd = ql_ms_24xx_iocb;
1168 ha->cmd_segs = CMD_TYPE_7_DATA_SEGMENTS;
1169 ha->cmd_cont_segs = CONT_TYPE_1_DATA_SEGMENTS;
1171 ha->nx_pcibase = ha->iobase;
1172 ha->iobase += 0xBC000 + (ha->function_number << 11);
1173 ha->iomap_iobase += 0xBC000 +
1174 (ha->function_number << 11);
1176 /* map doorbell */
1177 if (ddi_dev_regsize(dip, 2, &regsize) != DDI_SUCCESS ||
1178 ddi_regs_map_setup(dip, 2, &ha->db_iobase,
1179 0, regsize, &ql_dev_acc_attr, &ha->db_dev_handle) !=
1180 DDI_SUCCESS) {
1181 cmn_err(CE_WARN, "%s(%d): regs_map_setup"
1182 "(doorbell) failed", QL_NAME, instance);
1183 goto attach_failed;
1185 progress |= QL_DB_IOBASE_MAPPED;
1187 ha->nx_req_in = (uint32_t *)(ha->db_iobase +
1188 (ha->function_number << 12));
1189 ha->db_read = ha->nx_pcibase + (512 * 1024) +
1190 (ha->function_number * 8);
1192 ql_8021_update_crb_int_ptr(ha);
1193 ql_8021_set_drv_active(ha);
1194 break;
1196 default:
1197 cmn_err(CE_WARN, "%s(%d): Unsupported device id: %x",
1198 QL_NAME, instance, ha->device_id);
1199 goto attach_failed;
1202 /* Setup hba buffer. */
1204 size = CFG_IST(ha, CFG_CTRL_24258081) ?
1205 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE) :
1206 (REQUEST_QUEUE_SIZE + RESPONSE_QUEUE_SIZE +
1207 RCVBUF_QUEUE_SIZE);
1209 if (ql_get_dma_mem(ha, &ha->hba_buf, size, LITTLE_ENDIAN_DMA,
1210 QL_DMA_RING_ALIGN) != QL_SUCCESS) {
1211 cmn_err(CE_WARN, "%s(%d): request queue DMA memory "
1212 "alloc failed", QL_NAME, instance);
1213 goto attach_failed;
1215 progress |= QL_HBA_BUFFER_SETUP;
1217 /* Setup buffer pointers. */
1218 ha->request_dvma = ha->hba_buf.cookie.dmac_laddress +
1219 REQUEST_Q_BUFFER_OFFSET;
1220 ha->request_ring_bp = (struct cmd_entry *)
1221 ((caddr_t)ha->hba_buf.bp + REQUEST_Q_BUFFER_OFFSET);
1223 ha->response_dvma = ha->hba_buf.cookie.dmac_laddress +
1224 RESPONSE_Q_BUFFER_OFFSET;
1225 ha->response_ring_bp = (struct sts_entry *)
1226 ((caddr_t)ha->hba_buf.bp + RESPONSE_Q_BUFFER_OFFSET);
1228 ha->rcvbuf_dvma = ha->hba_buf.cookie.dmac_laddress +
1229 RCVBUF_Q_BUFFER_OFFSET;
1230 ha->rcvbuf_ring_bp = (struct rcvbuf *)
1231 ((caddr_t)ha->hba_buf.bp + RCVBUF_Q_BUFFER_OFFSET);
1233 /* Allocate resource for QLogic IOCTL */
1234 (void) ql_alloc_xioctl_resource(ha);
1236 /* Setup interrupts */
1237 if ((rval = ql_setup_interrupts(ha)) != DDI_SUCCESS) {
1238 cmn_err(CE_WARN, "%s(%d): Failed to add interrupt, "
1239 "rval=%xh", QL_NAME, instance, rval);
1240 goto attach_failed;
1243 progress |= (QL_INTR_ADDED | QL_MUTEX_CV_INITED);
1245 if (ql_nvram_cache_desc_ctor(ha) != DDI_SUCCESS) {
1246 cmn_err(CE_WARN, "%s(%d): can't setup nvram cache",
1247 QL_NAME, instance);
1248 goto attach_failed;
1252 * Allocate an N Port information structure
1253 * for use when in P2P topology.
1255 ha->n_port = (ql_n_port_info_t *)
1256 kmem_zalloc(sizeof (ql_n_port_info_t), KM_SLEEP);
1257 if (ha->n_port == NULL) {
1258 cmn_err(CE_WARN, "%s(%d): Failed to create N Port info",
1259 QL_NAME, instance);
1260 goto attach_failed;
1263 progress |= QL_N_PORT_INFO_CREATED;
1266 * Determine support for Power Management
1268 caps_ptr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR);
1270 while (caps_ptr != PCI_CAP_NEXT_PTR_NULL) {
1271 cap = (uint8_t)ql_pci_config_get8(ha, caps_ptr);
1272 if (cap == PCI_CAP_ID_PM) {
1273 ha->pm_capable = 1;
1274 break;
1276 caps_ptr = (uint8_t)ql_pci_config_get8(ha, caps_ptr +
1277 PCI_CAP_NEXT_PTR);
1280 if (ha->pm_capable) {
1282 * Enable PM for 2200 based HBAs only.
1284 if (ha->device_id != 0x2200) {
1285 ha->pm_capable = 0;
1289 if (ha->pm_capable) {
1290 ha->pm_capable = ql_enable_pm;
1293 if (ha->pm_capable) {
1295 * Initialize power management bookkeeping;
1296 * components are created idle.
1298 (void) sprintf(buf, "NAME=%s(%d)", QL_NAME, instance);
1299 pmcomps[0] = buf;
1301 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
1302 if (ddi_prop_update_string_array(DDI_DEV_T_NONE,
1303 dip, "pm-components", pmcomps,
1304 sizeof (pmcomps) / sizeof (pmcomps[0])) !=
1305 DDI_PROP_SUCCESS) {
1306 cmn_err(CE_WARN, "%s(%d): failed to create"
1307 " pm-components property", QL_NAME,
1308 instance);
1310 /* Initialize adapter. */
1311 ha->power_level = PM_LEVEL_D0;
1312 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1313 cmn_err(CE_WARN, "%s(%d): failed to"
1314 " initialize adapter", QL_NAME,
1315 instance);
1316 goto attach_failed;
1318 } else {
1319 ha->power_level = PM_LEVEL_D3;
1320 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1321 PM_LEVEL_D0) != DDI_SUCCESS) {
1322 cmn_err(CE_WARN, "%s(%d): failed to"
1323 " raise power or initialize"
1324 " adapter", QL_NAME, instance);
1327 } else {
1328 /* Initialize adapter. */
1329 ha->power_level = PM_LEVEL_D0;
1330 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1331 cmn_err(CE_WARN, "%s(%d): failed to initialize"
1332 " adapter", QL_NAME, instance);
1336 if (ha->fw_major_version == 0 && ha->fw_minor_version == 0 &&
1337 ha->fw_subminor_version == 0) {
1338 cmn_err(CE_NOTE, "!%s(%d): Firmware not loaded",
1339 QL_NAME, ha->instance);
1340 } else {
1341 int rval;
1342 char ver_fmt[256];
1344 rval = (int)snprintf(ver_fmt, (size_t)sizeof (ver_fmt),
1345 "Firmware version %d.%d.%d", ha->fw_major_version,
1346 ha->fw_minor_version, ha->fw_subminor_version);
1348 if (CFG_IST(ha, CFG_CTRL_81XX)) {
1349 rval = (int)snprintf(ver_fmt + rval,
1350 (size_t)sizeof (ver_fmt),
1351 ", MPI fw version %d.%d.%d",
1352 ha->mpi_fw_major_version,
1353 ha->mpi_fw_minor_version,
1354 ha->mpi_fw_subminor_version);
1356 if (ha->subsys_id == 0x17B ||
1357 ha->subsys_id == 0x17D) {
1358 (void) snprintf(ver_fmt + rval,
1359 (size_t)sizeof (ver_fmt),
1360 ", PHY fw version %d.%d.%d",
1361 ha->phy_fw_major_version,
1362 ha->phy_fw_minor_version,
1363 ha->phy_fw_subminor_version);
1366 cmn_err(CE_NOTE, "!%s(%d): %s",
1367 QL_NAME, ha->instance, ver_fmt);
1370 ha->k_stats = kstat_create(QL_NAME, instance, "statistics",
1371 "controller", KSTAT_TYPE_RAW,
1372 (uint32_t)sizeof (ql_adapter_stat_t), KSTAT_FLAG_VIRTUAL);
1373 if (ha->k_stats == NULL) {
1374 cmn_err(CE_WARN, "%s(%d): Failed to create kstat",
1375 QL_NAME, instance);
1376 goto attach_failed;
1378 progress |= QL_KSTAT_CREATED;
1380 ha->adapter_stats->version = 1;
1381 ha->k_stats->ks_data = (void *)ha->adapter_stats;
1382 ha->k_stats->ks_private = ha;
1383 ha->k_stats->ks_update = ql_kstat_update;
1384 ha->k_stats->ks_ndata = 1;
1385 ha->k_stats->ks_data_size = sizeof (ql_adapter_stat_t);
1386 kstat_install(ha->k_stats);
1388 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
1389 instance, DDI_NT_NEXUS, 0) != DDI_SUCCESS) {
1390 cmn_err(CE_WARN, "%s(%d): failed to create minor node",
1391 QL_NAME, instance);
1392 goto attach_failed;
1394 progress |= QL_MINOR_NODE_CREATED;
1396 /* Allocate a transport structure for this instance */
1397 tran = kmem_zalloc(sizeof (fc_fca_tran_t), KM_SLEEP);
1398 if (tran == NULL) {
1399 cmn_err(CE_WARN, "%s(%d): failed to allocate transport",
1400 QL_NAME, instance);
1401 goto attach_failed;
1404 progress |= QL_FCA_TRAN_ALLOCED;
1406 /* fill in the structure */
1407 tran->fca_numports = 1;
1408 tran->fca_version = FCTL_FCA_MODREV_5;
1409 if (CFG_IST(ha, CFG_CTRL_2422)) {
1410 tran->fca_num_npivports = MAX_24_VIRTUAL_PORTS;
1411 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
1412 tran->fca_num_npivports = MAX_25_VIRTUAL_PORTS;
1414 bcopy(ha->loginparams.node_ww_name.raw_wwn,
1415 tran->fca_perm_pwwn.raw_wwn, 8);
1417 EL(ha, "FCA version %d\n", tran->fca_version);
1419 /* Specify the amount of space needed in each packet */
1420 tran->fca_pkt_size = sizeof (ql_srb_t);
1422 /* command limits are usually dictated by hardware */
1423 tran->fca_cmd_max = MAX_OUTSTANDING_COMMANDS;
1425 /* dmaattr are static, set elsewhere. */
1426 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1427 tran->fca_dma_attr = &ql_64bit_io_dma_attr;
1428 tran->fca_dma_fcp_cmd_attr = &ql_64fcp_cmd_dma_attr;
1429 tran->fca_dma_fcp_rsp_attr = &ql_64fcp_rsp_dma_attr;
1430 tran->fca_dma_fcp_data_attr = &ql_64fcp_data_dma_attr;
1431 tran->fca_dma_fcsm_cmd_attr = &ql_64fcsm_cmd_dma_attr;
1432 tran->fca_dma_fcsm_rsp_attr = &ql_64fcsm_rsp_dma_attr;
1433 tran->fca_dma_fcip_cmd_attr = &ql_64fcip_cmd_dma_attr;
1434 tran->fca_dma_fcip_rsp_attr = &ql_64fcip_rsp_dma_attr;
1435 } else {
1436 tran->fca_dma_attr = &ql_32bit_io_dma_attr;
1437 tran->fca_dma_fcp_cmd_attr = &ql_32fcp_cmd_dma_attr;
1438 tran->fca_dma_fcp_rsp_attr = &ql_32fcp_rsp_dma_attr;
1439 tran->fca_dma_fcp_data_attr = &ql_32fcp_data_dma_attr;
1440 tran->fca_dma_fcsm_cmd_attr = &ql_32fcsm_cmd_dma_attr;
1441 tran->fca_dma_fcsm_rsp_attr = &ql_32fcsm_rsp_dma_attr;
1442 tran->fca_dma_fcip_cmd_attr = &ql_32fcip_cmd_dma_attr;
1443 tran->fca_dma_fcip_rsp_attr = &ql_32fcip_rsp_dma_attr;
1446 tran->fca_acc_attr = &ql_dev_acc_attr;
1447 tran->fca_iblock = &(ha->iblock_cookie);
1449 /* the remaining values are simply function vectors */
1450 tran->fca_bind_port = ql_bind_port;
1451 tran->fca_unbind_port = ql_unbind_port;
1452 tran->fca_init_pkt = ql_init_pkt;
1453 tran->fca_un_init_pkt = ql_un_init_pkt;
1454 tran->fca_els_send = ql_els_send;
1455 tran->fca_get_cap = ql_get_cap;
1456 tran->fca_set_cap = ql_set_cap;
1457 tran->fca_getmap = ql_getmap;
1458 tran->fca_transport = ql_transport;
1459 tran->fca_ub_alloc = ql_ub_alloc;
1460 tran->fca_ub_free = ql_ub_free;
1461 tran->fca_ub_release = ql_ub_release;
1462 tran->fca_abort = ql_abort;
1463 tran->fca_reset = ql_reset;
1464 tran->fca_port_manage = ql_port_manage;
1465 tran->fca_get_device = ql_get_device;
1467 /* give it to the FC transport */
1468 if (fc_fca_attach(dip, tran) != DDI_SUCCESS) {
1469 cmn_err(CE_WARN, "%s(%d): FCA attach failed", QL_NAME,
1470 instance);
1471 goto attach_failed;
1473 progress |= QL_FCA_ATTACH_DONE;
1475 /* Stash the structure so it can be freed at detach */
1476 ha->tran = tran;
1478 /* Acquire global state lock. */
1479 GLOBAL_STATE_LOCK();
1481 /* Add adapter structure to link list. */
1482 ql_add_link_b(&ql_hba, &ha->hba);
1484 /* Start one second driver timer. */
1485 if (ql_timer_timeout_id == NULL) {
1486 ql_timer_ticks = drv_usectohz(1000000);
1487 ql_timer_timeout_id = timeout(ql_timer, NULL,
1488 ql_timer_ticks);
1491 /* Release global state lock. */
1492 GLOBAL_STATE_UNLOCK();
1494 /* Determine and populate HBA fru info */
1495 ql_setup_fruinfo(ha);
1497 /* Setup task_daemon thread. */
1498 (void) thread_create(NULL, 0, (void (*)())ql_task_daemon, ha,
1499 0, &p0, TS_RUN, minclsyspri);
1501 progress |= QL_TASK_DAEMON_STARTED;
1503 ddi_report_dev(dip);
1505 /* Disable link reset in panic path */
1506 ha->lip_on_panic = 1;
1508 rval = DDI_SUCCESS;
1509 break;
1511 attach_failed:
1512 if (progress & QL_FCA_ATTACH_DONE) {
1513 (void) fc_fca_detach(dip);
1514 progress &= ~QL_FCA_ATTACH_DONE;
1517 if (progress & QL_FCA_TRAN_ALLOCED) {
1518 kmem_free(tran, sizeof (fc_fca_tran_t));
1519 progress &= ~QL_FCA_TRAN_ALLOCED;
1522 if (progress & QL_MINOR_NODE_CREATED) {
1523 ddi_remove_minor_node(dip, "devctl");
1524 progress &= ~QL_MINOR_NODE_CREATED;
1527 if (progress & QL_KSTAT_CREATED) {
1528 kstat_delete(ha->k_stats);
1529 progress &= ~QL_KSTAT_CREATED;
1532 if (progress & QL_N_PORT_INFO_CREATED) {
1533 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1534 progress &= ~QL_N_PORT_INFO_CREATED;
1537 if (progress & QL_TASK_DAEMON_STARTED) {
1538 TASK_DAEMON_LOCK(ha);
1540 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1542 cv_signal(&ha->cv_task_daemon);
1544 /* Release task daemon lock. */
1545 TASK_DAEMON_UNLOCK(ha);
1547 /* Wait for for task daemon to stop running. */
1548 while (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1549 ql_delay(ha, 10000);
1551 progress &= ~QL_TASK_DAEMON_STARTED;
1554 if (progress & QL_DB_IOBASE_MAPPED) {
1555 ql_8021_clr_drv_active(ha);
1556 ddi_regs_map_free(&ha->db_dev_handle);
1557 progress &= ~QL_DB_IOBASE_MAPPED;
1559 if (progress & QL_IOMAP_IOBASE_MAPPED) {
1560 ddi_regs_map_free(&ha->iomap_dev_handle);
1561 progress &= ~QL_IOMAP_IOBASE_MAPPED;
1564 if (progress & QL_CONFIG_SPACE_SETUP) {
1565 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1566 ddi_regs_map_free(&ha->sbus_config_handle);
1567 } else {
1568 pci_config_teardown(&ha->pci_handle);
1570 progress &= ~QL_CONFIG_SPACE_SETUP;
1573 if (progress & QL_INTR_ADDED) {
1574 ql_disable_intr(ha);
1575 ql_release_intr(ha);
1576 progress &= ~QL_INTR_ADDED;
1579 if (progress & QL_MUTEX_CV_INITED) {
1580 ql_destroy_mutex(ha);
1581 progress &= ~QL_MUTEX_CV_INITED;
1584 if (progress & QL_HBA_BUFFER_SETUP) {
1585 ql_free_phys(ha, &ha->hba_buf);
1586 progress &= ~QL_HBA_BUFFER_SETUP;
1589 if (progress & QL_REGS_MAPPED) {
1590 ddi_regs_map_free(&ha->dev_handle);
1591 if (ha->sbus_fpga_iobase != NULL) {
1592 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1594 progress &= ~QL_REGS_MAPPED;
1597 if (progress & QL_SOFT_STATE_ALLOCED) {
1599 ql_fcache_rel(ha->fcache);
1601 kmem_free(ha->adapter_stats,
1602 sizeof (*ha->adapter_stats));
1604 kmem_free(ha->ub_array, sizeof (*ha->ub_array) *
1605 QL_UB_LIMIT);
1607 kmem_free(ha->outstanding_cmds,
1608 sizeof (*ha->outstanding_cmds) *
1609 MAX_OUTSTANDING_COMMANDS);
1611 if (ha->devpath != NULL) {
1612 kmem_free(ha->devpath,
1613 strlen(ha->devpath) + 1);
1616 kmem_free(ha->dev, sizeof (*ha->dev) *
1617 DEVICE_HEAD_LIST_SIZE);
1619 if (ha->xioctl != NULL) {
1620 ql_free_xioctl_resource(ha);
1623 if (ha->fw_module != NULL) {
1624 (void) ddi_modclose(ha->fw_module);
1626 (void) ql_el_trace_desc_dtor(ha);
1627 (void) ql_nvram_cache_desc_dtor(ha);
1629 ddi_soft_state_free(ql_state, instance);
1630 progress &= ~QL_SOFT_STATE_ALLOCED;
1633 ddi_prop_remove_all(dip);
1634 rval = DDI_FAILURE;
1635 break;
1637 case DDI_RESUME:
1638 rval = DDI_FAILURE;
1640 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1641 if (ha == NULL) {
1642 cmn_err(CE_WARN, "%s(%d): can't get soft state",
1643 QL_NAME, instance);
1644 break;
1647 ha->power_level = PM_LEVEL_D3;
1648 if (ha->pm_capable) {
1650 * Get ql_power to do power on initialization
1652 if (pm_raise_power(dip, QL_POWER_COMPONENT,
1653 PM_LEVEL_D0) != DDI_SUCCESS) {
1654 cmn_err(CE_WARN, "%s(%d): can't raise adapter"
1655 " power", QL_NAME, instance);
1660 * There is a bug in DR that prevents PM framework
1661 * from calling ql_power.
1663 if (ha->power_level == PM_LEVEL_D3) {
1664 ha->power_level = PM_LEVEL_D0;
1666 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
1667 cmn_err(CE_WARN, "%s(%d): can't initialize the"
1668 " adapter", QL_NAME, instance);
1671 /* Wake up task_daemon. */
1672 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG,
1676 /* Acquire global state lock. */
1677 GLOBAL_STATE_LOCK();
1679 /* Restart driver timer. */
1680 if (ql_timer_timeout_id == NULL) {
1681 ql_timer_timeout_id = timeout(ql_timer, NULL,
1682 ql_timer_ticks);
1685 /* Release global state lock. */
1686 GLOBAL_STATE_UNLOCK();
1688 /* Wake up command start routine. */
1689 ADAPTER_STATE_LOCK(ha);
1690 ha->flags &= ~ADAPTER_SUSPENDED;
1691 ADAPTER_STATE_UNLOCK(ha);
1694 * Transport doesn't make FC discovery in polled
1695 * mode; So we need the daemon thread's services
1696 * right here.
1698 (void) callb_generic_cpr(&ha->cprinfo, CB_CODE_CPR_RESUME);
1700 rval = DDI_SUCCESS;
1702 /* Restart IP if it was running. */
1703 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
1704 (void) ql_initialize_ip(ha);
1705 ql_isp_rcvbuf(ha);
1707 break;
1709 default:
1710 cmn_err(CE_WARN, "%s(%d): attach, unknown code:"
1711 " %x", QL_NAME, ddi_get_instance(dip), cmd);
1712 rval = DDI_FAILURE;
1713 break;
1716 kmem_free(buf, MAXPATHLEN);
1718 if (rval != DDI_SUCCESS) {
1719 /*EMPTY*/
1720 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
1721 ddi_get_instance(dip), rval);
1722 } else {
1723 /*EMPTY*/
1724 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
1727 return (rval);
1731 * ql_detach
1732 * Used to remove all the states associated with a given
1733 * instances of a device node prior to the removal of that
1734 * instance from the system.
1736 * Input:
1737 * dip = pointer to device information structure.
1738 * cmd = type of detach.
1740 * Returns:
1741 * DDI_SUCCESS or DDI_FAILURE.
1743 * Context:
1744 * Kernel context.
1746 static int
1747 ql_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
1749 ql_adapter_state_t *ha, *vha;
1750 ql_tgt_t *tq;
1751 int delay_cnt;
1752 uint16_t index;
1753 ql_link_t *link;
1754 char *buf;
1755 timeout_id_t timer_id = NULL;
1756 int suspend, rval = DDI_SUCCESS;
1758 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
1759 if (ha == NULL) {
1760 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
1761 ddi_get_instance(dip));
1762 return (DDI_FAILURE);
1765 QL_PRINT_3(CE_CONT, "(%d): started, cmd=%xh\n", ha->instance, cmd);
1767 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
1769 switch (cmd) {
1770 case DDI_DETACH:
1771 ADAPTER_STATE_LOCK(ha);
1772 ha->flags |= (ADAPTER_SUSPENDED | ABORT_CMDS_LOOP_DOWN_TMO);
1773 ADAPTER_STATE_UNLOCK(ha);
1775 TASK_DAEMON_LOCK(ha);
1777 if (ha->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) {
1778 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
1779 cv_signal(&ha->cv_task_daemon);
1781 TASK_DAEMON_UNLOCK(ha);
1783 (void) ql_wait_for_td_stop(ha);
1785 TASK_DAEMON_LOCK(ha);
1786 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
1787 ha->task_daemon_flags &= ~TASK_DAEMON_STOP_FLG;
1788 EL(ha, "failed, could not stop task daemon\n");
1791 TASK_DAEMON_UNLOCK(ha);
1793 GLOBAL_STATE_LOCK();
1795 /* Disable driver timer if no adapters. */
1796 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
1797 ql_hba.last == &ha->hba) {
1798 timer_id = ql_timer_timeout_id;
1799 ql_timer_timeout_id = NULL;
1801 ql_remove_link(&ql_hba, &ha->hba);
1803 GLOBAL_STATE_UNLOCK();
1805 if (timer_id) {
1806 (void) untimeout(timer_id);
1809 if (ha->pm_capable) {
1810 if (pm_lower_power(dip, QL_POWER_COMPONENT,
1811 PM_LEVEL_D3) != DDI_SUCCESS) {
1812 cmn_err(CE_WARN, "%s(%d): failed to lower the"
1813 " power", QL_NAME, ha->instance);
1818 * If pm_lower_power shutdown the adapter, there
1819 * isn't much else to do
1821 if (ha->power_level != PM_LEVEL_D3) {
1822 ql_halt(ha, PM_LEVEL_D3);
1825 /* Remove virtual ports. */
1826 while ((vha = ha->vp_next) != NULL) {
1827 ql_vport_destroy(vha);
1830 /* Free target queues. */
1831 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
1832 link = ha->dev[index].first;
1833 while (link != NULL) {
1834 tq = link->base_address;
1835 link = link->next;
1836 ql_dev_free(ha, tq);
1841 * Free unsolicited buffers.
1842 * If we are here then there are no ULPs still
1843 * alive that wish to talk to ql so free up
1844 * any SRB_IP_UB_UNUSED buffers that are
1845 * lingering around
1847 QL_UB_LOCK(ha);
1848 for (index = 0; index < QL_UB_LIMIT; index++) {
1849 fc_unsol_buf_t *ubp = ha->ub_array[index];
1851 if (ubp != NULL) {
1852 ql_srb_t *sp = ubp->ub_fca_private;
1854 sp->flags |= SRB_UB_FREE_REQUESTED;
1856 while (!(sp->flags & SRB_UB_IN_FCA) ||
1857 (sp->flags & (SRB_UB_CALLBACK |
1858 SRB_UB_ACQUIRED))) {
1859 QL_UB_UNLOCK(ha);
1860 delay(drv_usectohz(100000));
1861 QL_UB_LOCK(ha);
1863 ha->ub_array[index] = NULL;
1865 QL_UB_UNLOCK(ha);
1866 ql_free_unsolicited_buffer(ha, ubp);
1867 QL_UB_LOCK(ha);
1870 QL_UB_UNLOCK(ha);
1872 /* Free any saved RISC code. */
1873 if (ha->risc_code != NULL) {
1874 kmem_free(ha->risc_code, ha->risc_code_size);
1875 ha->risc_code = NULL;
1876 ha->risc_code_size = 0;
1879 if (ha->fw_module != NULL) {
1880 (void) ddi_modclose(ha->fw_module);
1881 ha->fw_module = NULL;
1884 /* Free resources. */
1885 ddi_prop_remove_all(dip);
1886 (void) fc_fca_detach(dip);
1887 kmem_free(ha->tran, sizeof (fc_fca_tran_t));
1888 ddi_remove_minor_node(dip, "devctl");
1889 if (ha->k_stats != NULL) {
1890 kstat_delete(ha->k_stats);
1893 if (CFG_IST(ha, CFG_SBUS_CARD)) {
1894 ddi_regs_map_free(&ha->sbus_config_handle);
1895 } else {
1896 if (CFG_IST(ha, CFG_CTRL_8021)) {
1897 ql_8021_clr_drv_active(ha);
1898 ddi_regs_map_free(&ha->db_dev_handle);
1900 if (ha->iomap_dev_handle != ha->dev_handle) {
1901 ddi_regs_map_free(&ha->iomap_dev_handle);
1903 pci_config_teardown(&ha->pci_handle);
1906 ql_disable_intr(ha);
1907 ql_release_intr(ha);
1909 ql_free_xioctl_resource(ha);
1911 ql_destroy_mutex(ha);
1913 ql_free_phys(ha, &ha->hba_buf);
1914 ql_free_phys(ha, &ha->fwexttracebuf);
1915 ql_free_phys(ha, &ha->fwfcetracebuf);
1917 ddi_regs_map_free(&ha->dev_handle);
1918 if (ha->sbus_fpga_iobase != NULL) {
1919 ddi_regs_map_free(&ha->sbus_fpga_dev_handle);
1922 ql_fcache_rel(ha->fcache);
1923 if (ha->vcache != NULL) {
1924 kmem_free(ha->vcache, QL_24XX_VPD_SIZE);
1927 if (ha->pi_attrs != NULL) {
1928 kmem_free(ha->pi_attrs, sizeof (fca_port_attrs_t));
1931 kmem_free(ha->adapter_stats, sizeof (*ha->adapter_stats));
1933 kmem_free(ha->ub_array, sizeof (*ha->ub_array) * QL_UB_LIMIT);
1935 kmem_free(ha->outstanding_cmds,
1936 sizeof (*ha->outstanding_cmds) * MAX_OUTSTANDING_COMMANDS);
1938 if (ha->n_port != NULL) {
1939 kmem_free(ha->n_port, sizeof (ql_n_port_info_t));
1942 if (ha->devpath != NULL) {
1943 kmem_free(ha->devpath, strlen(ha->devpath) + 1);
1946 kmem_free(ha->dev, sizeof (*ha->dev) * DEVICE_HEAD_LIST_SIZE);
1948 EL(ha, "detached\n");
1950 ddi_soft_state_free(ql_state, (int)ha->instance);
1952 break;
1954 case DDI_SUSPEND:
1955 ADAPTER_STATE_LOCK(ha);
1957 delay_cnt = 0;
1958 ha->flags |= ADAPTER_SUSPENDED;
1959 while (ha->flags & ADAPTER_TIMER_BUSY && delay_cnt++ < 10) {
1960 ADAPTER_STATE_UNLOCK(ha);
1961 ddi_sleep(1);
1962 ADAPTER_STATE_LOCK(ha);
1964 if (ha->busy || ha->flags & ADAPTER_TIMER_BUSY) {
1965 ha->flags &= ~ADAPTER_SUSPENDED;
1966 ADAPTER_STATE_UNLOCK(ha);
1967 rval = DDI_FAILURE;
1968 cmn_err(CE_WARN, "!%s(%d): Fail suspend"
1969 " busy %xh flags %xh", QL_NAME, ha->instance,
1970 ha->busy, ha->flags);
1971 break;
1974 ADAPTER_STATE_UNLOCK(ha);
1976 if (ha->flags & IP_INITIALIZED) {
1977 (void) ql_shutdown_ip(ha);
1980 if ((suspend = ql_suspend_adapter(ha)) != QL_SUCCESS) {
1981 ADAPTER_STATE_LOCK(ha);
1982 ha->flags &= ~ADAPTER_SUSPENDED;
1983 ADAPTER_STATE_UNLOCK(ha);
1984 cmn_err(CE_WARN, "%s(%d): Fail suspend rval %xh",
1985 QL_NAME, ha->instance, suspend);
1987 /* Restart IP if it was running. */
1988 if (ha->flags & IP_ENABLED &&
1989 !(ha->flags & IP_INITIALIZED)) {
1990 (void) ql_initialize_ip(ha);
1991 ql_isp_rcvbuf(ha);
1993 rval = DDI_FAILURE;
1994 break;
1997 /* Acquire global state lock. */
1998 GLOBAL_STATE_LOCK();
2000 /* Disable driver timer if last adapter. */
2001 if (ql_timer_timeout_id && ql_hba.first == &ha->hba &&
2002 ql_hba.last == &ha->hba) {
2003 timer_id = ql_timer_timeout_id;
2004 ql_timer_timeout_id = NULL;
2006 GLOBAL_STATE_UNLOCK();
2008 if (timer_id) {
2009 (void) untimeout(timer_id);
2012 EL(ha, "suspended\n");
2014 break;
2016 default:
2017 rval = DDI_FAILURE;
2018 break;
2021 kmem_free(buf, MAXPATHLEN);
2023 if (rval != DDI_SUCCESS) {
2024 if (ha != NULL) {
2025 EL(ha, "failed, rval = %xh\n", rval);
2026 } else {
2027 /*EMPTY*/
2028 QL_PRINT_2(CE_CONT, "(%d): failed, rval = %xh\n",
2029 ddi_get_instance(dip), rval);
2031 } else {
2032 /*EMPTY*/
2033 QL_PRINT_3(CE_CONT, "(%d): done\n", ddi_get_instance(dip));
2036 return (rval);
2041 * ql_power
2042 * Power a device attached to the system.
2044 * Input:
2045 * dip = pointer to device information structure.
2046 * component = device.
2047 * level = power level.
2049 * Returns:
2050 * DDI_SUCCESS or DDI_FAILURE.
2052 * Context:
2053 * Kernel context.
2055 /* ARGSUSED */
2056 static int
2057 ql_power(dev_info_t *dip, int component, int level)
2059 int rval = DDI_FAILURE;
2060 off_t csr;
2061 uint8_t saved_pm_val;
2062 ql_adapter_state_t *ha;
2063 char *buf;
2064 char *path;
2066 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2067 if (ha == NULL || ha->pm_capable == 0) {
2068 QL_PRINT_2(CE_CONT, "(%d): no hba or PM not supported\n",
2069 ddi_get_instance(dip));
2070 return (rval);
2073 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2075 buf = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2076 path = (char *)(kmem_zalloc(MAXPATHLEN, KM_SLEEP));
2078 if (component != QL_POWER_COMPONENT || (level != PM_LEVEL_D0 &&
2079 level != PM_LEVEL_D3)) {
2080 EL(ha, "invalid, component=%xh or level=%xh\n",
2081 component, level);
2082 return (rval);
2085 GLOBAL_HW_LOCK();
2086 csr = (uint8_t)ql_pci_config_get8(ha, PCI_CONF_CAP_PTR) + PCI_PMCSR;
2087 GLOBAL_HW_UNLOCK();
2089 (void) snprintf(buf, sizeof (buf),
2090 "Qlogic %s(%d): %s\n\t", QL_NAME, ddi_get_instance(dip),
2091 ddi_pathname(dip, path));
2093 switch (level) {
2094 case PM_LEVEL_D0: /* power up to D0 state - fully on */
2096 QL_PM_LOCK(ha);
2097 if (ha->power_level == PM_LEVEL_D0) {
2098 QL_PM_UNLOCK(ha);
2099 rval = DDI_SUCCESS;
2100 break;
2104 * Enable interrupts now
2106 saved_pm_val = ha->power_level;
2107 ha->power_level = PM_LEVEL_D0;
2108 QL_PM_UNLOCK(ha);
2110 GLOBAL_HW_LOCK();
2112 ql_pci_config_put16(ha, csr, PCI_PMCSR_D0);
2115 * Delay after reset, for chip to recover.
2116 * Otherwise causes system PANIC
2118 drv_usecwait(200000);
2120 GLOBAL_HW_UNLOCK();
2122 if (ha->config_saved) {
2123 ha->config_saved = 0;
2124 if (QL_RESTORE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2125 QL_PM_LOCK(ha);
2126 ha->power_level = saved_pm_val;
2127 QL_PM_UNLOCK(ha);
2128 cmn_err(CE_WARN, "%s failed to restore "
2129 "config regs", buf);
2130 break;
2134 if (ql_initialize_adapter(ha) != QL_SUCCESS) {
2135 cmn_err(CE_WARN, "%s adapter initialization failed",
2136 buf);
2139 /* Wake up task_daemon. */
2140 ql_awaken_task_daemon(ha, NULL, TASK_DAEMON_ALIVE_FLG |
2141 TASK_DAEMON_SLEEPING_FLG, 0);
2143 /* Restart IP if it was running. */
2144 if (ha->flags & IP_ENABLED && !(ha->flags & IP_INITIALIZED)) {
2145 (void) ql_initialize_ip(ha);
2146 ql_isp_rcvbuf(ha);
2149 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered ON\n",
2150 ha->instance, QL_NAME);
2152 rval = DDI_SUCCESS;
2153 break;
2155 case PM_LEVEL_D3: /* power down to D3 state - off */
2157 QL_PM_LOCK(ha);
2159 if (ha->busy || ((ha->task_daemon_flags &
2160 TASK_DAEMON_SLEEPING_FLG) == 0)) {
2161 QL_PM_UNLOCK(ha);
2162 break;
2165 if (ha->power_level == PM_LEVEL_D3) {
2166 rval = DDI_SUCCESS;
2167 QL_PM_UNLOCK(ha);
2168 break;
2170 QL_PM_UNLOCK(ha);
2172 if (QL_SAVE_CONFIG_REGS(dip) != DDI_SUCCESS) {
2173 cmn_err(CE_WARN, "!Qlogic %s(%d): %s failed to save"
2174 " config regs", QL_NAME, ha->instance, buf);
2175 break;
2177 ha->config_saved = 1;
2180 * Don't enable interrupts. Running mailbox commands with
2181 * interrupts enabled could cause hangs since pm_run_scan()
2182 * runs out of a callout thread and on single cpu systems
2183 * cv_reltimedwait_sig(), called from ql_mailbox_command(),
2184 * would not get to run.
2186 TASK_DAEMON_LOCK(ha);
2187 ha->task_daemon_flags |= TASK_DAEMON_POWERING_DOWN;
2188 TASK_DAEMON_UNLOCK(ha);
2190 ql_halt(ha, PM_LEVEL_D3);
2193 * Setup ql_intr to ignore interrupts from here on.
2195 QL_PM_LOCK(ha);
2196 ha->power_level = PM_LEVEL_D3;
2197 QL_PM_UNLOCK(ha);
2200 * Wait for ISR to complete.
2202 INTR_LOCK(ha);
2203 ql_pci_config_put16(ha, csr, PCI_PMCSR_D3HOT);
2204 INTR_UNLOCK(ha);
2206 cmn_err(CE_NOTE, QL_BANG "ql_power(%d): %s is powered OFF\n",
2207 ha->instance, QL_NAME);
2209 rval = DDI_SUCCESS;
2210 break;
2213 kmem_free(buf, MAXPATHLEN);
2214 kmem_free(path, MAXPATHLEN);
2216 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
2218 return (rval);
2222 * ql_quiesce
2223 * quiesce a device attached to the system.
2225 * Input:
2226 * dip = pointer to device information structure.
2228 * Returns:
2229 * DDI_SUCCESS
2231 * Context:
2232 * Kernel context.
2234 static int
2235 ql_quiesce(dev_info_t *dip)
2237 ql_adapter_state_t *ha;
2238 uint32_t timer;
2239 uint32_t stat;
2241 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2242 if (ha == NULL) {
2243 /* Oh well.... */
2244 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2245 ddi_get_instance(dip));
2246 return (DDI_SUCCESS);
2249 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2251 if (CFG_IST(ha, CFG_CTRL_8021)) {
2252 (void) ql_stop_firmware(ha);
2253 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
2254 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2255 WRT16_IO_REG(ha, mailbox_in[0], MBC_STOP_FIRMWARE);
2256 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
2257 for (timer = 0; timer < 30000; timer++) {
2258 stat = RD32_IO_REG(ha, risc2host);
2259 if (stat & BIT_15) {
2260 if ((stat & 0xff) < 0x12) {
2261 WRT32_IO_REG(ha, hccr,
2262 HC24_CLR_RISC_INT);
2263 break;
2265 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
2267 drv_usecwait(100);
2269 /* Reset the chip. */
2270 WRT32_IO_REG(ha, ctrl_status, ISP_RESET | DMA_SHUTDOWN |
2271 MWB_4096_BYTES);
2272 drv_usecwait(100);
2274 } else {
2275 /* Disable ISP interrupts. */
2276 WRT16_IO_REG(ha, ictrl, 0);
2277 /* Select RISC module registers. */
2278 WRT16_IO_REG(ha, ctrl_status, 0);
2279 /* Reset ISP semaphore. */
2280 WRT16_IO_REG(ha, semaphore, 0);
2281 /* Reset RISC module. */
2282 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
2283 /* Release RISC module. */
2284 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
2287 ql_disable_intr(ha);
2289 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2291 return (DDI_SUCCESS);
2294 /* ************************************************************************ */
2295 /* Fibre Channel Adapter (FCA) Transport Functions. */
2296 /* ************************************************************************ */
2299 * ql_bind_port
2300 * Handling port binding. The FC Transport attempts to bind an FCA port
2301 * when it is ready to start transactions on the port. The FC Transport
2302 * will call the fca_bind_port() function specified in the fca_transport
2303 * structure it receives. The FCA must fill in the port_info structure
2304 * passed in the call and also stash the information for future calls.
2306 * Input:
2307 * dip = pointer to FCA information structure.
2308 * port_info = pointer to port information structure.
2309 * bind_info = pointer to bind information structure.
2311 * Returns:
2312 * NULL = failure
2314 * Context:
2315 * Kernel context.
2317 static opaque_t
2318 ql_bind_port(dev_info_t *dip, fc_fca_port_info_t *port_info,
2319 fc_fca_bind_info_t *bind_info)
2321 ql_adapter_state_t *ha, *vha;
2322 opaque_t fca_handle = NULL;
2323 port_id_t d_id;
2324 int port_npiv = bind_info->port_npiv;
2325 uchar_t *port_nwwn = bind_info->port_nwwn.raw_wwn;
2326 uchar_t *port_pwwn = bind_info->port_pwwn.raw_wwn;
2328 /* get state info based on the dip */
2329 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
2330 if (ha == NULL) {
2331 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
2332 ddi_get_instance(dip));
2333 return (NULL);
2335 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
2337 /* Verify port number is supported. */
2338 if (port_npiv != 0) {
2339 if (!(ha->flags & VP_ENABLED)) {
2340 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_NOT_SUPPORTED\n",
2341 ha->instance);
2342 port_info->pi_error = FC_NPIV_NOT_SUPPORTED;
2343 return (NULL);
2345 if (!(ha->flags & POINT_TO_POINT)) {
2346 QL_PRINT_2(CE_CONT, "(%d): FC_NPIV_WRONG_TOPOLOGY\n",
2347 ha->instance);
2348 port_info->pi_error = FC_NPIV_WRONG_TOPOLOGY;
2349 return (NULL);
2351 if (!(ha->flags & FDISC_ENABLED)) {
2352 QL_PRINT_2(CE_CONT, "(%d): switch does not support "
2353 "FDISC\n", ha->instance);
2354 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2355 return (NULL);
2357 if (bind_info->port_num > (CFG_IST(ha, CFG_CTRL_2422) ?
2358 MAX_24_VIRTUAL_PORTS : MAX_25_VIRTUAL_PORTS)) {
2359 QL_PRINT_2(CE_CONT, "(%d): port number=%d "
2360 "FC_OUTOFBOUNDS\n", ha->instance);
2361 port_info->pi_error = FC_OUTOFBOUNDS;
2362 return (NULL);
2364 } else if (bind_info->port_num != 0) {
2365 QL_PRINT_2(CE_CONT, "(%d): failed, port number=%d is not "
2366 "supported\n", ha->instance, bind_info->port_num);
2367 port_info->pi_error = FC_OUTOFBOUNDS;
2368 return (NULL);
2371 /* Locate port context. */
2372 for (vha = ha; vha != NULL; vha = vha->vp_next) {
2373 if (vha->vp_index == bind_info->port_num) {
2374 break;
2378 /* If virtual port does not exist. */
2379 if (vha == NULL) {
2380 vha = ql_vport_create(ha, (uint8_t)bind_info->port_num);
2383 /* make sure this port isn't already bound */
2384 if (vha->flags & FCA_BOUND) {
2385 port_info->pi_error = FC_ALREADY;
2386 } else {
2387 if (vha->vp_index != 0) {
2388 bcopy(port_nwwn,
2389 vha->loginparams.node_ww_name.raw_wwn, 8);
2390 bcopy(port_pwwn,
2391 vha->loginparams.nport_ww_name.raw_wwn, 8);
2393 if (vha->vp_index != 0 && !(vha->flags & VP_ENABLED)) {
2394 if (ql_vport_enable(vha) != QL_SUCCESS) {
2395 QL_PRINT_2(CE_CONT, "(%d): failed to enable "
2396 "virtual port=%d\n", ha->instance,
2397 vha->vp_index);
2398 port_info->pi_error = FC_NPIV_FDISC_FAILED;
2399 return (NULL);
2401 cmn_err(CE_CONT, "!Qlogic %s(%d) NPIV(%d) "
2402 "WWPN=%02x%02x%02x%02x%02x%02x%02x%02x : "
2403 "WWNN=%02x%02x%02x%02x%02x%02x%02x%02x\n",
2404 QL_NAME, ha->instance, vha->vp_index,
2405 port_pwwn[0], port_pwwn[1], port_pwwn[2],
2406 port_pwwn[3], port_pwwn[4], port_pwwn[5],
2407 port_pwwn[6], port_pwwn[7],
2408 port_nwwn[0], port_nwwn[1], port_nwwn[2],
2409 port_nwwn[3], port_nwwn[4], port_nwwn[5],
2410 port_nwwn[6], port_nwwn[7]);
2413 /* stash the bind_info supplied by the FC Transport */
2414 vha->bind_info.port_handle = bind_info->port_handle;
2415 vha->bind_info.port_statec_cb =
2416 bind_info->port_statec_cb;
2417 vha->bind_info.port_unsol_cb = bind_info->port_unsol_cb;
2419 /* Set port's source ID. */
2420 port_info->pi_s_id.port_id = vha->d_id.b24;
2422 /* copy out the default login parameters */
2423 bcopy((void *)&vha->loginparams,
2424 (void *)&port_info->pi_login_params,
2425 sizeof (la_els_logi_t));
2427 /* Set port's hard address if enabled. */
2428 port_info->pi_hard_addr.hard_addr = 0;
2429 if (bind_info->port_num == 0) {
2430 d_id.b24 = ha->d_id.b24;
2431 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2432 if (ha->init_ctrl_blk.cb24.
2433 firmware_options_1[0] & BIT_0) {
2434 d_id.b.al_pa = ql_index_to_alpa[ha->
2435 init_ctrl_blk.cb24.
2436 hard_address[0]];
2437 port_info->pi_hard_addr.hard_addr =
2438 d_id.b24;
2440 } else if (ha->init_ctrl_blk.cb.firmware_options[0] &
2441 BIT_0) {
2442 d_id.b.al_pa = ql_index_to_alpa[ha->
2443 init_ctrl_blk.cb.hard_address[0]];
2444 port_info->pi_hard_addr.hard_addr = d_id.b24;
2447 /* Set the node id data */
2448 if (ql_get_rnid_params(ha,
2449 sizeof (port_info->pi_rnid_params.params),
2450 (caddr_t)&port_info->pi_rnid_params.params) ==
2451 QL_SUCCESS) {
2452 port_info->pi_rnid_params.status = FC_SUCCESS;
2453 } else {
2454 port_info->pi_rnid_params.status = FC_FAILURE;
2457 /* Populate T11 FC-HBA details */
2458 ql_populate_hba_fru_details(ha, port_info);
2459 ha->pi_attrs = kmem_zalloc(sizeof (fca_port_attrs_t),
2460 KM_SLEEP);
2461 if (ha->pi_attrs != NULL) {
2462 bcopy(&port_info->pi_attrs, ha->pi_attrs,
2463 sizeof (fca_port_attrs_t));
2465 } else {
2466 port_info->pi_rnid_params.status = FC_FAILURE;
2467 if (ha->pi_attrs != NULL) {
2468 bcopy(ha->pi_attrs, &port_info->pi_attrs,
2469 sizeof (fca_port_attrs_t));
2473 /* Generate handle for this FCA. */
2474 fca_handle = (opaque_t)vha;
2476 ADAPTER_STATE_LOCK(ha);
2477 vha->flags |= FCA_BOUND;
2478 ADAPTER_STATE_UNLOCK(ha);
2479 /* Set port's current state. */
2480 port_info->pi_port_state = vha->state;
2483 QL_PRINT_10(CE_CONT, "(%d,%d): done, pi_port_state=%xh, "
2484 "pi_s_id.port_id=%xh\n", ha->instance, ha->vp_index,
2485 port_info->pi_port_state, port_info->pi_s_id.port_id);
2487 return (fca_handle);
2491 * ql_unbind_port
2492 * To unbind a Fibre Channel Adapter from an FC Port driver.
2494 * Input:
2495 * fca_handle = handle setup by ql_bind_port().
2497 * Context:
2498 * Kernel context.
2500 static void
2501 ql_unbind_port(opaque_t fca_handle)
2503 ql_adapter_state_t *ha;
2504 ql_tgt_t *tq;
2505 uint32_t flgs;
2507 ha = ql_fca_handle_to_state(fca_handle);
2508 if (ha == NULL) {
2509 /*EMPTY*/
2510 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2511 (void *)fca_handle);
2512 } else {
2513 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance,
2514 ha->vp_index);
2516 if (!(ha->flags & FCA_BOUND)) {
2517 /*EMPTY*/
2518 QL_PRINT_2(CE_CONT, "(%d): port=%d already unbound\n",
2519 ha->instance, ha->vp_index);
2520 } else {
2521 if (ha->vp_index != 0 && ha->flags & VP_ENABLED) {
2522 if ((tq = ql_loop_id_to_queue(ha,
2523 FL_PORT_24XX_HDL)) != NULL) {
2524 (void) ql_logout_fabric_port(ha, tq);
2526 (void) ql_vport_control(ha, (uint8_t)
2527 (CFG_IST(ha, CFG_CTRL_2425) ?
2528 VPC_DISABLE_INIT : VPC_DISABLE_LOGOUT));
2529 flgs = FCA_BOUND | VP_ENABLED;
2530 } else {
2531 flgs = FCA_BOUND;
2533 ADAPTER_STATE_LOCK(ha);
2534 ha->flags &= ~flgs;
2535 ADAPTER_STATE_UNLOCK(ha);
2538 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
2539 ha->vp_index);
2544 * ql_init_pkt
2545 * Initialize FCA portion of packet.
2547 * Input:
2548 * fca_handle = handle setup by ql_bind_port().
2549 * pkt = pointer to fc_packet.
2551 * Returns:
2552 * FC_SUCCESS - the packet has successfully been initialized.
2553 * FC_UNBOUND - the fca_handle specified is not bound.
2554 * FC_NOMEM - the FCA failed initialization due to an allocation error.
2555 * FC_FAILURE - the FCA failed initialization for undisclosed reasons
2557 * Context:
2558 * Kernel context.
2560 /* ARGSUSED */
2561 static int
2562 ql_init_pkt(opaque_t fca_handle, fc_packet_t *pkt, int sleep)
2564 ql_adapter_state_t *ha;
2565 ql_srb_t *sp;
2566 int rval = FC_SUCCESS;
2568 ha = ql_fca_handle_to_state(fca_handle);
2569 if (ha == NULL) {
2570 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2571 (void *)fca_handle);
2572 return (FC_UNBOUND);
2574 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2576 sp = (ql_srb_t *)pkt->pkt_fca_private;
2577 sp->flags = 0;
2579 /* init cmd links */
2580 sp->cmd.base_address = sp;
2581 sp->cmd.prev = NULL;
2582 sp->cmd.next = NULL;
2583 sp->cmd.head = NULL;
2585 /* init watchdog links */
2586 sp->wdg.base_address = sp;
2587 sp->wdg.prev = NULL;
2588 sp->wdg.next = NULL;
2589 sp->wdg.head = NULL;
2590 sp->pkt = pkt;
2591 sp->ha = ha;
2592 sp->magic_number = QL_FCA_BRAND;
2593 sp->sg_dma.dma_handle = NULL;
2594 if (CFG_IST(ha, CFG_CTRL_8021)) {
2595 /* Setup DMA for scatter gather list. */
2596 sp->sg_dma.size = sizeof (cmd6_2400_dma_t);
2597 sp->sg_dma.type = LITTLE_ENDIAN_DMA;
2598 sp->sg_dma.cookie_count = 1;
2599 sp->sg_dma.alignment = 64;
2600 if (ql_alloc_phys(ha, &sp->sg_dma, KM_SLEEP) != QL_SUCCESS) {
2601 rval = FC_NOMEM;
2605 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2607 return (rval);
2611 * ql_un_init_pkt
2612 * Release all local resources bound to packet.
2614 * Input:
2615 * fca_handle = handle setup by ql_bind_port().
2616 * pkt = pointer to fc_packet.
2618 * Returns:
2619 * FC_SUCCESS - the packet has successfully been invalidated.
2620 * FC_UNBOUND - the fca_handle specified is not bound.
2621 * FC_BADPACKET - the packet has not been initialized or has
2622 * already been freed by this FCA.
2624 * Context:
2625 * Kernel context.
2627 static int
2628 ql_un_init_pkt(opaque_t fca_handle, fc_packet_t *pkt)
2630 ql_adapter_state_t *ha;
2631 int rval;
2632 ql_srb_t *sp;
2634 ha = ql_fca_handle_to_state(fca_handle);
2635 if (ha == NULL) {
2636 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2637 (void *)fca_handle);
2638 return (FC_UNBOUND);
2640 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2642 sp = (ql_srb_t *)pkt->pkt_fca_private;
2644 if (sp->magic_number != QL_FCA_BRAND) {
2645 EL(ha, "failed, FC_BADPACKET\n");
2646 rval = FC_BADPACKET;
2647 } else {
2648 sp->magic_number = 0;
2649 ql_free_phys(ha, &sp->sg_dma);
2650 rval = FC_SUCCESS;
2653 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2655 return (rval);
2659 * ql_els_send
2660 * Issue a extended link service request.
2662 * Input:
2663 * fca_handle = handle setup by ql_bind_port().
2664 * pkt = pointer to fc_packet.
2666 * Returns:
2667 * FC_SUCCESS - the command was successful.
2668 * FC_ELS_FREJECT - the command was rejected by a Fabric.
2669 * FC_ELS_PREJECT - the command was rejected by an N-port.
2670 * FC_TRANSPORT_ERROR - a transport error occurred.
2671 * FC_UNBOUND - the fca_handle specified is not bound.
2672 * FC_ELS_BAD - the FCA can not issue the requested ELS.
2674 * Context:
2675 * Kernel context.
2677 static int
2678 ql_els_send(opaque_t fca_handle, fc_packet_t *pkt)
2680 ql_adapter_state_t *ha;
2681 int rval;
2682 clock_t timer = drv_usectohz(30000000);
2683 ls_code_t els;
2684 la_els_rjt_t rjt;
2685 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
2687 /* Verify proper command. */
2688 ha = ql_cmd_setup(fca_handle, pkt, &rval);
2689 if (ha == NULL) {
2690 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
2691 rval, fca_handle);
2692 return (FC_INVALID_REQUEST);
2694 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2696 /* Wait for suspension to end. */
2697 TASK_DAEMON_LOCK(ha);
2698 while (ha->task_daemon_flags & QL_SUSPENDED) {
2699 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
2701 /* 30 seconds from now */
2702 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
2703 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
2705 * The timeout time 'timer' was
2706 * reached without the condition
2707 * being signaled.
2709 pkt->pkt_state = FC_PKT_TRAN_BSY;
2710 pkt->pkt_reason = FC_REASON_XCHG_BSY;
2712 /* Release task daemon lock. */
2713 TASK_DAEMON_UNLOCK(ha);
2715 EL(ha, "QL_SUSPENDED failed=%xh\n",
2716 QL_FUNCTION_TIMEOUT);
2717 return (FC_TRAN_BUSY);
2720 /* Release task daemon lock. */
2721 TASK_DAEMON_UNLOCK(ha);
2723 /* Setup response header. */
2724 bcopy((void *)&pkt->pkt_cmd_fhdr, (void *)&pkt->pkt_resp_fhdr,
2725 sizeof (fc_frame_hdr_t));
2727 if (pkt->pkt_rsplen) {
2728 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
2731 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
2732 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
2733 pkt->pkt_resp_fhdr.r_ctl = R_CTL_EXTENDED_SVC |
2734 R_CTL_SOLICITED_CONTROL;
2735 pkt->pkt_resp_fhdr.f_ctl = F_CTL_XCHG_CONTEXT | F_CTL_LAST_SEQ |
2736 F_CTL_END_SEQ;
2738 sp->flags &= ~(SRB_UB_CALLBACK | SRB_UB_RSCN | SRB_UB_FCP |
2739 SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT | SRB_FCP_RSP_PKT |
2740 SRB_IP_PKT | SRB_COMMAND_TIMEOUT | SRB_UB_ACQUIRED | SRB_MS_PKT);
2742 sp->flags |= SRB_ELS_PKT;
2744 /* map the type of ELS to a function */
2745 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
2746 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
2748 #if 0
2749 QL_PRINT_3(CE_CONT, "(%d): command fhdr:\n", ha->instance);
2750 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
2751 sizeof (fc_frame_hdr_t) / 4);
2752 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
2753 QL_DUMP_3((uint8_t *)&els, 32, sizeof (els) / 4);
2754 #endif
2756 sp->iocb = ha->els_cmd;
2757 sp->req_cnt = 1;
2759 switch (els.ls_code) {
2760 case LA_ELS_RJT:
2761 case LA_ELS_ACC:
2762 EL(ha, "LA_ELS_RJT\n");
2763 pkt->pkt_state = FC_PKT_SUCCESS;
2764 rval = FC_SUCCESS;
2765 break;
2766 case LA_ELS_PLOGI:
2767 case LA_ELS_PDISC:
2768 rval = ql_els_plogi(ha, pkt);
2769 break;
2770 case LA_ELS_FLOGI:
2771 case LA_ELS_FDISC:
2772 rval = ql_els_flogi(ha, pkt);
2773 break;
2774 case LA_ELS_LOGO:
2775 rval = ql_els_logo(ha, pkt);
2776 break;
2777 case LA_ELS_PRLI:
2778 rval = ql_els_prli(ha, pkt);
2779 break;
2780 case LA_ELS_PRLO:
2781 rval = ql_els_prlo(ha, pkt);
2782 break;
2783 case LA_ELS_ADISC:
2784 rval = ql_els_adisc(ha, pkt);
2785 break;
2786 case LA_ELS_LINIT:
2787 rval = ql_els_linit(ha, pkt);
2788 break;
2789 case LA_ELS_LPC:
2790 rval = ql_els_lpc(ha, pkt);
2791 break;
2792 case LA_ELS_LSTS:
2793 rval = ql_els_lsts(ha, pkt);
2794 break;
2795 case LA_ELS_SCR:
2796 rval = ql_els_scr(ha, pkt);
2797 break;
2798 case LA_ELS_RSCN:
2799 rval = ql_els_rscn(ha, pkt);
2800 break;
2801 case LA_ELS_FARP_REQ:
2802 rval = ql_els_farp_req(ha, pkt);
2803 break;
2804 case LA_ELS_FARP_REPLY:
2805 rval = ql_els_farp_reply(ha, pkt);
2806 break;
2807 case LA_ELS_RLS:
2808 rval = ql_els_rls(ha, pkt);
2809 break;
2810 case LA_ELS_RNID:
2811 rval = ql_els_rnid(ha, pkt);
2812 break;
2813 default:
2814 EL(ha, "LA_ELS_RJT, FC_REASON_CMD_UNSUPPORTED=%xh\n",
2815 els.ls_code);
2816 /* Build RJT. */
2817 bzero(&rjt, sizeof (rjt));
2818 rjt.ls_code.ls_code = LA_ELS_RJT;
2819 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
2821 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
2822 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
2824 pkt->pkt_state = FC_PKT_LOCAL_RJT;
2825 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
2826 rval = FC_SUCCESS;
2827 break;
2830 #if 0
2831 QL_PRINT_3(CE_CONT, "(%d): response fhdr:\n", ha->instance);
2832 QL_DUMP_3((uint8_t *)&pkt->pkt_resp_fhdr, 32,
2833 sizeof (fc_frame_hdr_t) / 4);
2834 #endif
2836 * Return success if the srb was consumed by an iocb. The packet
2837 * completion callback will be invoked by the response handler.
2839 if (rval == QL_CONSUMED) {
2840 rval = FC_SUCCESS;
2841 } else if (rval == FC_SUCCESS &&
2842 !(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
2843 /* Do command callback only if no error */
2844 ql_awaken_task_daemon(ha, sp, 0, 0);
2847 if (rval != FC_SUCCESS) {
2848 EL(ha, "failed, rval = %xh\n", rval);
2849 } else {
2850 /*EMPTY*/
2851 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2853 return (rval);
2857 * ql_get_cap
2858 * Export FCA hardware and software capabilities.
2860 * Input:
2861 * fca_handle = handle setup by ql_bind_port().
2862 * cap = pointer to the capabilities string.
2863 * ptr = buffer pointer for return capability.
2865 * Returns:
2866 * FC_CAP_ERROR - no such capability
2867 * FC_CAP_FOUND - the capability was returned and cannot be set
2868 * FC_CAP_SETTABLE - the capability was returned and can be set
2869 * FC_UNBOUND - the fca_handle specified is not bound.
2871 * Context:
2872 * Kernel context.
2874 static int
2875 ql_get_cap(opaque_t fca_handle, char *cap, void *ptr)
2877 ql_adapter_state_t *ha;
2878 int rval;
2879 uint32_t *rptr = (uint32_t *)ptr;
2881 ha = ql_fca_handle_to_state(fca_handle);
2882 if (ha == NULL) {
2883 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2884 (void *)fca_handle);
2885 return (FC_UNBOUND);
2887 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2889 if (strcmp(cap, FC_NODE_WWN) == 0) {
2890 bcopy((void *)&ha->loginparams.node_ww_name.raw_wwn[0],
2891 ptr, 8);
2892 rval = FC_CAP_FOUND;
2893 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2894 bcopy((void *)&ha->loginparams, ptr,
2895 sizeof (la_els_logi_t));
2896 rval = FC_CAP_FOUND;
2897 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2898 *rptr = (uint32_t)QL_UB_LIMIT;
2899 rval = FC_CAP_FOUND;
2900 } else if (strcmp(cap, FC_CAP_NOSTREAM_ON_UNALIGN_BUF) == 0) {
2902 dev_info_t *psydip = NULL;
2904 if (psydip) {
2905 *rptr = (uint32_t)FC_NO_STREAMING;
2906 EL(ha, "No Streaming\n");
2907 } else {
2908 *rptr = (uint32_t)FC_ALLOW_STREAMING;
2909 EL(ha, "Allow Streaming\n");
2911 rval = FC_CAP_FOUND;
2912 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2913 if (CFG_IST(ha, CFG_CTRL_24258081)) {
2914 *rptr = (uint32_t)CHAR_TO_SHORT(
2915 ha->init_ctrl_blk.cb24.max_frame_length[0],
2916 ha->init_ctrl_blk.cb24.max_frame_length[1]);
2917 } else {
2918 *rptr = (uint32_t)CHAR_TO_SHORT(
2919 ha->init_ctrl_blk.cb.max_frame_length[0],
2920 ha->init_ctrl_blk.cb.max_frame_length[1]);
2922 rval = FC_CAP_FOUND;
2923 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2924 *rptr = FC_RESET_RETURN_ALL;
2925 rval = FC_CAP_FOUND;
2926 } else if (strcmp(cap, FC_CAP_FCP_DMA) == 0) {
2927 *rptr = FC_NO_DVMA_SPACE;
2928 rval = FC_CAP_FOUND;
2929 } else {
2930 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2931 rval = FC_CAP_ERROR;
2934 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2936 return (rval);
2940 * ql_set_cap
2941 * Allow the FC Transport to set FCA capabilities if possible.
2943 * Input:
2944 * fca_handle = handle setup by ql_bind_port().
2945 * cap = pointer to the capabilities string.
2946 * ptr = buffer pointer for capability.
2948 * Returns:
2949 * FC_CAP_ERROR - no such capability
2950 * FC_CAP_FOUND - the capability cannot be set by the FC Transport.
2951 * FC_CAP_SETTABLE - the capability was successfully set.
2952 * FC_UNBOUND - the fca_handle specified is not bound.
2954 * Context:
2955 * Kernel context.
2957 /* ARGSUSED */
2958 static int
2959 ql_set_cap(opaque_t fca_handle, char *cap, void *ptr)
2961 ql_adapter_state_t *ha;
2962 int rval;
2964 ha = ql_fca_handle_to_state(fca_handle);
2965 if (ha == NULL) {
2966 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
2967 (void *)fca_handle);
2968 return (FC_UNBOUND);
2970 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
2972 if (strcmp(cap, FC_NODE_WWN) == 0) {
2973 rval = FC_CAP_FOUND;
2974 } else if (strcmp(cap, FC_LOGIN_PARAMS) == 0) {
2975 rval = FC_CAP_FOUND;
2976 } else if (strcmp(cap, FC_CAP_UNSOL_BUF) == 0) {
2977 rval = FC_CAP_FOUND;
2978 } else if (strcmp(cap, FC_CAP_PAYLOAD_SIZE) == 0) {
2979 rval = FC_CAP_FOUND;
2980 } else if (strcmp(cap, FC_CAP_POST_RESET_BEHAVIOR) == 0) {
2981 rval = FC_CAP_FOUND;
2982 } else {
2983 EL(ha, "unknown=%s, FC_CAP_ERROR\n", cap);
2984 rval = FC_CAP_ERROR;
2987 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
2989 return (rval);
2993 * ql_getmap
2994 * Request of Arbitrated Loop (AL-PA) map.
2996 * Input:
2997 * fca_handle = handle setup by ql_bind_port().
2998 * mapbuf= buffer pointer for map.
3000 * Returns:
3001 * FC_OLDPORT - the specified port is not operating in loop mode.
3002 * FC_OFFLINE - the specified port is not online.
3003 * FC_NOMAP - there is no loop map available for this port.
3004 * FC_UNBOUND - the fca_handle specified is not bound.
3005 * FC_SUCCESS - a valid map has been placed in mapbuf.
3007 * Context:
3008 * Kernel context.
3010 static int
3011 ql_getmap(opaque_t fca_handle, fc_lilpmap_t *mapbuf)
3013 ql_adapter_state_t *ha;
3014 clock_t timer = drv_usectohz(30000000);
3015 int rval = FC_SUCCESS;
3017 ha = ql_fca_handle_to_state(fca_handle);
3018 if (ha == NULL) {
3019 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3020 (void *)fca_handle);
3021 return (FC_UNBOUND);
3023 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3025 mapbuf->lilp_magic = (uint16_t)MAGIC_LIRP;
3026 mapbuf->lilp_myalpa = ha->d_id.b.al_pa;
3028 /* Wait for suspension to end. */
3029 TASK_DAEMON_LOCK(ha);
3030 while (ha->task_daemon_flags & QL_SUSPENDED) {
3031 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
3033 /* 30 seconds from now */
3034 if (cv_reltimedwait(&ha->pha->cv_dr_suspended,
3035 &ha->pha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
3037 * The timeout time 'timer' was
3038 * reached without the condition
3039 * being signaled.
3042 /* Release task daemon lock. */
3043 TASK_DAEMON_UNLOCK(ha);
3045 EL(ha, "QL_SUSPENDED failed, FC_TRAN_BUSY\n");
3046 return (FC_TRAN_BUSY);
3049 /* Release task daemon lock. */
3050 TASK_DAEMON_UNLOCK(ha);
3052 if (ql_get_loop_position_map(ha, LOOP_POSITION_MAP_SIZE,
3053 (caddr_t)&mapbuf->lilp_length) != QL_SUCCESS) {
3055 * Now, since transport drivers cosider this as an
3056 * offline condition, let's wait for few seconds
3057 * for any loop transitions before we reset the.
3058 * chip and restart all over again.
3060 ql_delay(ha, 2000000);
3061 EL(ha, "failed, FC_NOMAP\n");
3062 rval = FC_NOMAP;
3063 } else {
3064 /*EMPTY*/
3065 QL_PRINT_3(CE_CONT, "(%d): my_alpa %xh len %xh "
3066 "data %xh %xh %xh %xh\n", ha->instance,
3067 mapbuf->lilp_myalpa, mapbuf->lilp_length,
3068 mapbuf->lilp_alpalist[0], mapbuf->lilp_alpalist[1],
3069 mapbuf->lilp_alpalist[2], mapbuf->lilp_alpalist[3]);
3072 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3073 #if 0
3074 QL_DUMP_3((uint8_t *)mapbuf, 8, sizeof (fc_lilpmap_t));
3075 #endif
3076 return (rval);
3080 * ql_transport
3081 * Issue an I/O request. Handles all regular requests.
3083 * Input:
3084 * fca_handle = handle setup by ql_bind_port().
3085 * pkt = pointer to fc_packet.
3087 * Returns:
3088 * FC_SUCCESS - the packet was accepted for transport.
3089 * FC_TRANSPORT_ERROR - a transport error occurred.
3090 * FC_BADPACKET - the packet to be transported had not been
3091 * initialized by this FCA.
3092 * FC_UNBOUND - the fca_handle specified is not bound.
3094 * Context:
3095 * Kernel context.
3097 static int
3098 ql_transport(opaque_t fca_handle, fc_packet_t *pkt)
3100 ql_adapter_state_t *ha;
3101 int rval = FC_TRANSPORT_ERROR;
3102 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
3104 /* Verify proper command. */
3105 ha = ql_cmd_setup(fca_handle, pkt, &rval);
3106 if (ha == NULL) {
3107 QL_PRINT_2(CE_CONT, "failed, ql_cmd_setup=%xh, fcah=%ph\n",
3108 rval, fca_handle);
3109 return (rval);
3111 QL_PRINT_3(CE_CONT, "(%d): started command:\n", ha->instance);
3112 #if 0
3113 QL_DUMP_3((uint8_t *)&pkt->pkt_cmd_fhdr, 32,
3114 sizeof (fc_frame_hdr_t) / 4);
3115 QL_PRINT_3(CE_CONT, "(%d): command:\n", ha->instance);
3116 QL_DUMP_3((uint8_t *)pkt->pkt_cmd, 8, pkt->pkt_cmdlen);
3117 #endif
3119 /* Reset SRB flags. */
3120 sp->flags &= ~(SRB_ISP_STARTED | SRB_ISP_COMPLETED | SRB_RETRY |
3121 SRB_POLL | SRB_WATCHDOG_ENABLED | SRB_ABORT | SRB_UB_CALLBACK |
3122 SRB_UB_RSCN | SRB_UB_FCP | SRB_FCP_CMD_PKT | SRB_FCP_DATA_PKT |
3123 SRB_FCP_RSP_PKT | SRB_IP_PKT | SRB_GENERIC_SERVICES_PKT |
3124 SRB_COMMAND_TIMEOUT | SRB_ABORTING | SRB_IN_DEVICE_QUEUE |
3125 SRB_IN_TOKEN_ARRAY | SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED |
3126 SRB_MS_PKT | SRB_ELS_PKT);
3128 pkt->pkt_resp_fhdr.d_id = ha->d_id.b24;
3129 pkt->pkt_resp_fhdr.r_ctl = R_CTL_STATUS;
3130 pkt->pkt_resp_fhdr.s_id = pkt->pkt_cmd_fhdr.d_id;
3131 pkt->pkt_resp_fhdr.f_ctl = pkt->pkt_cmd_fhdr.f_ctl;
3132 pkt->pkt_resp_fhdr.type = pkt->pkt_cmd_fhdr.type;
3134 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3135 case R_CTL_COMMAND:
3136 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
3137 sp->flags |= SRB_FCP_CMD_PKT;
3138 rval = ql_fcp_scsi_cmd(ha, pkt, sp);
3140 break;
3142 default:
3143 /* Setup response header and buffer. */
3144 if (pkt->pkt_rsplen) {
3145 bzero((void *)pkt->pkt_resp, pkt->pkt_rsplen);
3148 switch (pkt->pkt_cmd_fhdr.r_ctl) {
3149 case R_CTL_UNSOL_DATA:
3150 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_IS8802_SNAP) {
3151 sp->flags |= SRB_IP_PKT;
3152 rval = ql_fcp_ip_cmd(ha, pkt, sp);
3154 break;
3156 case R_CTL_UNSOL_CONTROL:
3157 if (pkt->pkt_cmd_fhdr.type == FC_TYPE_FC_SERVICES) {
3158 sp->flags |= SRB_GENERIC_SERVICES_PKT;
3159 rval = ql_fc_services(ha, pkt);
3161 break;
3163 case R_CTL_SOLICITED_DATA:
3164 case R_CTL_STATUS:
3165 default:
3166 pkt->pkt_state = FC_PKT_LOCAL_RJT;
3167 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
3168 rval = FC_TRANSPORT_ERROR;
3169 EL(ha, "unknown, r_ctl=%xh\n",
3170 pkt->pkt_cmd_fhdr.r_ctl);
3171 break;
3175 if (rval != FC_SUCCESS) {
3176 EL(ha, "failed, rval = %xh\n", rval);
3177 } else {
3178 /*EMPTY*/
3179 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3182 return (rval);
3186 * ql_ub_alloc
3187 * Allocate buffers for unsolicited exchanges.
3189 * Input:
3190 * fca_handle = handle setup by ql_bind_port().
3191 * tokens = token array for each buffer.
3192 * size = size of each buffer.
3193 * count = pointer to number of buffers.
3194 * type = the FC-4 type the buffers are reserved for.
3195 * 1 = Extended Link Services, 5 = LLC/SNAP
3197 * Returns:
3198 * FC_FAILURE - buffers could not be allocated.
3199 * FC_TOOMANY - the FCA could not allocate the requested
3200 * number of buffers.
3201 * FC_SUCCESS - unsolicited buffers were allocated.
3202 * FC_UNBOUND - the fca_handle specified is not bound.
3204 * Context:
3205 * Kernel context.
3207 static int
3208 ql_ub_alloc(opaque_t fca_handle, uint64_t tokens[], uint32_t size,
3209 uint32_t *count, uint32_t type)
3211 ql_adapter_state_t *ha;
3212 caddr_t bufp = NULL;
3213 fc_unsol_buf_t *ubp;
3214 ql_srb_t *sp;
3215 uint32_t index;
3216 uint32_t cnt;
3217 uint32_t ub_array_index = 0;
3218 int rval = FC_SUCCESS;
3219 int ub_updated = FALSE;
3221 /* Check handle. */
3222 ha = ql_fca_handle_to_state(fca_handle);
3223 if (ha == NULL) {
3224 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3225 (void *)fca_handle);
3226 return (FC_UNBOUND);
3228 QL_PRINT_3(CE_CONT, "(%d,%d): started, count = %xh\n",
3229 ha->instance, ha->vp_index, *count);
3231 QL_PM_LOCK(ha);
3232 if (ha->power_level != PM_LEVEL_D0) {
3233 QL_PM_UNLOCK(ha);
3234 QL_PRINT_3(CE_CONT, "(%d,%d): down done\n", ha->instance,
3235 ha->vp_index);
3236 return (FC_FAILURE);
3238 QL_PM_UNLOCK(ha);
3240 /* Acquire adapter state lock. */
3241 ADAPTER_STATE_LOCK(ha);
3243 /* Check the count. */
3244 if ((*count + ha->ub_allocated) > QL_UB_LIMIT) {
3245 *count = 0;
3246 EL(ha, "failed, FC_TOOMANY\n");
3247 rval = FC_TOOMANY;
3251 * reset ub_array_index
3253 ub_array_index = 0;
3256 * Now proceed to allocate any buffers required
3258 for (index = 0; index < *count && rval == FC_SUCCESS; index++) {
3259 /* Allocate all memory needed. */
3260 ubp = (fc_unsol_buf_t *)kmem_zalloc(sizeof (fc_unsol_buf_t),
3261 KM_SLEEP);
3262 if (ubp == NULL) {
3263 EL(ha, "failed, FC_FAILURE\n");
3264 rval = FC_FAILURE;
3265 } else {
3266 sp = kmem_zalloc(sizeof (ql_srb_t), KM_SLEEP);
3267 if (sp == NULL) {
3268 kmem_free(ubp, sizeof (fc_unsol_buf_t));
3269 rval = FC_FAILURE;
3270 } else {
3271 if (type == FC_TYPE_IS8802_SNAP) {
3272 if (ql_get_dma_mem(ha,
3273 &sp->ub_buffer, size,
3274 LITTLE_ENDIAN_DMA,
3275 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
3276 rval = FC_FAILURE;
3277 kmem_free(ubp,
3278 sizeof (fc_unsol_buf_t));
3279 kmem_free(sp,
3280 sizeof (ql_srb_t));
3281 } else {
3282 bufp = sp->ub_buffer.bp;
3283 sp->ub_size = size;
3285 } else {
3286 bufp = kmem_zalloc(size, KM_SLEEP);
3287 if (bufp == NULL) {
3288 rval = FC_FAILURE;
3289 kmem_free(ubp,
3290 sizeof (fc_unsol_buf_t));
3291 kmem_free(sp,
3292 sizeof (ql_srb_t));
3293 } else {
3294 sp->ub_size = size;
3300 if (rval == FC_SUCCESS) {
3301 /* Find next available slot. */
3302 QL_UB_LOCK(ha);
3303 while (ha->ub_array[ub_array_index] != NULL) {
3304 ub_array_index++;
3307 ubp->ub_fca_private = (void *)sp;
3309 /* init cmd links */
3310 sp->cmd.base_address = sp;
3311 sp->cmd.prev = NULL;
3312 sp->cmd.next = NULL;
3313 sp->cmd.head = NULL;
3315 /* init wdg links */
3316 sp->wdg.base_address = sp;
3317 sp->wdg.prev = NULL;
3318 sp->wdg.next = NULL;
3319 sp->wdg.head = NULL;
3320 sp->ha = ha;
3322 ubp->ub_buffer = bufp;
3323 ubp->ub_bufsize = size;
3324 ubp->ub_port_handle = fca_handle;
3325 ubp->ub_token = ub_array_index;
3327 /* Save the token. */
3328 tokens[index] = ub_array_index;
3330 /* Setup FCA private information. */
3331 sp->ub_type = type;
3332 sp->handle = ub_array_index;
3333 sp->flags |= SRB_UB_IN_FCA;
3335 ha->ub_array[ub_array_index] = ubp;
3336 ha->ub_allocated++;
3337 ub_updated = TRUE;
3338 QL_UB_UNLOCK(ha);
3342 /* Release adapter state lock. */
3343 ADAPTER_STATE_UNLOCK(ha);
3345 /* IP buffer. */
3346 if (ub_updated) {
3347 if ((type == FC_TYPE_IS8802_SNAP) &&
3348 (!(CFG_IST(ha, (CFG_CTRL_6322 | CFG_CTRL_2581))))) {
3350 ADAPTER_STATE_LOCK(ha);
3351 ha->flags |= IP_ENABLED;
3352 ADAPTER_STATE_UNLOCK(ha);
3354 if (!(ha->flags & IP_INITIALIZED)) {
3355 if (CFG_IST(ha, CFG_CTRL_2422)) {
3356 ha->ip_init_ctrl_blk.cb24.mtu_size[0] =
3357 LSB(ql_ip_mtu);
3358 ha->ip_init_ctrl_blk.cb24.mtu_size[1] =
3359 MSB(ql_ip_mtu);
3360 ha->ip_init_ctrl_blk.cb24.buf_size[0] =
3361 LSB(size);
3362 ha->ip_init_ctrl_blk.cb24.buf_size[1] =
3363 MSB(size);
3365 cnt = CHAR_TO_SHORT(
3366 ha->ip_init_ctrl_blk.cb24.cc[0],
3367 ha->ip_init_ctrl_blk.cb24.cc[1]);
3369 if (cnt < *count) {
3370 ha->ip_init_ctrl_blk.cb24.cc[0]
3371 = LSB(*count);
3372 ha->ip_init_ctrl_blk.cb24.cc[1]
3373 = MSB(*count);
3375 } else {
3376 ha->ip_init_ctrl_blk.cb.mtu_size[0] =
3377 LSB(ql_ip_mtu);
3378 ha->ip_init_ctrl_blk.cb.mtu_size[1] =
3379 MSB(ql_ip_mtu);
3380 ha->ip_init_ctrl_blk.cb.buf_size[0] =
3381 LSB(size);
3382 ha->ip_init_ctrl_blk.cb.buf_size[1] =
3383 MSB(size);
3385 cnt = CHAR_TO_SHORT(
3386 ha->ip_init_ctrl_blk.cb.cc[0],
3387 ha->ip_init_ctrl_blk.cb.cc[1]);
3389 if (cnt < *count) {
3390 ha->ip_init_ctrl_blk.cb.cc[0] =
3391 LSB(*count);
3392 ha->ip_init_ctrl_blk.cb.cc[1] =
3393 MSB(*count);
3397 (void) ql_initialize_ip(ha);
3399 ql_isp_rcvbuf(ha);
3403 if (rval != FC_SUCCESS) {
3404 EL(ha, "failed=%xh\n", rval);
3405 } else {
3406 /*EMPTY*/
3407 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3408 ha->vp_index);
3410 return (rval);
3414 * ql_ub_free
3415 * Free unsolicited buffers.
3417 * Input:
3418 * fca_handle = handle setup by ql_bind_port().
3419 * count = number of buffers.
3420 * tokens = token array for each buffer.
3422 * Returns:
3423 * FC_SUCCESS - the requested buffers have been freed.
3424 * FC_UNBOUND - the fca_handle specified is not bound.
3425 * FC_UB_BADTOKEN - an invalid token was encountered.
3426 * No buffers have been released.
3428 * Context:
3429 * Kernel context.
3431 static int
3432 ql_ub_free(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3434 ql_adapter_state_t *ha;
3435 ql_srb_t *sp;
3436 uint32_t index;
3437 uint64_t ub_array_index;
3438 int rval = FC_SUCCESS;
3440 /* Check handle. */
3441 ha = ql_fca_handle_to_state(fca_handle);
3442 if (ha == NULL) {
3443 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3444 (void *)fca_handle);
3445 return (FC_UNBOUND);
3447 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3449 /* Acquire adapter state lock. */
3450 ADAPTER_STATE_LOCK(ha);
3452 /* Check all returned tokens. */
3453 for (index = 0; index < count; index++) {
3454 fc_unsol_buf_t *ubp;
3456 /* Check the token range. */
3457 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3458 EL(ha, "failed, FC_UB_BADTOKEN\n");
3459 rval = FC_UB_BADTOKEN;
3460 break;
3463 /* Check the unsolicited buffer array. */
3464 QL_UB_LOCK(ha);
3465 ubp = ha->ub_array[ub_array_index];
3467 if (ubp == NULL) {
3468 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3469 rval = FC_UB_BADTOKEN;
3470 QL_UB_UNLOCK(ha);
3471 break;
3474 /* Check the state of the unsolicited buffer. */
3475 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3476 sp->flags |= SRB_UB_FREE_REQUESTED;
3478 while (!(sp->flags & SRB_UB_IN_FCA) ||
3479 (sp->flags & (SRB_UB_CALLBACK | SRB_UB_ACQUIRED))) {
3480 QL_UB_UNLOCK(ha);
3481 ADAPTER_STATE_UNLOCK(ha);
3482 delay(drv_usectohz(100000));
3483 ADAPTER_STATE_LOCK(ha);
3484 QL_UB_LOCK(ha);
3486 ha->ub_array[ub_array_index] = NULL;
3487 QL_UB_UNLOCK(ha);
3488 ql_free_unsolicited_buffer(ha, ubp);
3491 if (rval == FC_SUCCESS) {
3493 * Signal any pending hardware reset when there are
3494 * no more unsolicited buffers in use.
3496 if (ha->ub_allocated == 0) {
3497 cv_broadcast(&ha->pha->cv_ub);
3501 /* Release adapter state lock. */
3502 ADAPTER_STATE_UNLOCK(ha);
3504 if (rval != FC_SUCCESS) {
3505 EL(ha, "failed=%xh\n", rval);
3506 } else {
3507 /*EMPTY*/
3508 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3510 return (rval);
3514 * ql_ub_release
3515 * Release unsolicited buffers from FC Transport
3516 * to FCA for future use.
3518 * Input:
3519 * fca_handle = handle setup by ql_bind_port().
3520 * count = number of buffers.
3521 * tokens = token array for each buffer.
3523 * Returns:
3524 * FC_SUCCESS - the requested buffers have been released.
3525 * FC_UNBOUND - the fca_handle specified is not bound.
3526 * FC_UB_BADTOKEN - an invalid token was encountered.
3527 * No buffers have been released.
3529 * Context:
3530 * Kernel context.
3532 static int
3533 ql_ub_release(opaque_t fca_handle, uint32_t count, uint64_t tokens[])
3535 ql_adapter_state_t *ha;
3536 ql_srb_t *sp;
3537 uint32_t index;
3538 uint64_t ub_array_index;
3539 int rval = FC_SUCCESS;
3540 int ub_ip_updated = FALSE;
3542 /* Check handle. */
3543 ha = ql_fca_handle_to_state(fca_handle);
3544 if (ha == NULL) {
3545 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3546 (void *)fca_handle);
3547 return (FC_UNBOUND);
3549 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
3551 /* Acquire adapter state lock. */
3552 ADAPTER_STATE_LOCK(ha);
3553 QL_UB_LOCK(ha);
3555 /* Check all returned tokens. */
3556 for (index = 0; index < count; index++) {
3557 /* Check the token range. */
3558 if ((ub_array_index = tokens[index]) >= QL_UB_LIMIT) {
3559 EL(ha, "failed, FC_UB_BADTOKEN\n");
3560 rval = FC_UB_BADTOKEN;
3561 break;
3564 /* Check the unsolicited buffer array. */
3565 if (ha->ub_array[ub_array_index] == NULL) {
3566 EL(ha, "failed, FC_UB_BADTOKEN-2\n");
3567 rval = FC_UB_BADTOKEN;
3568 break;
3571 /* Check the state of the unsolicited buffer. */
3572 sp = ha->ub_array[ub_array_index]->ub_fca_private;
3573 if (sp->flags & SRB_UB_IN_FCA) {
3574 EL(ha, "failed, FC_UB_BADTOKEN-3\n");
3575 rval = FC_UB_BADTOKEN;
3576 break;
3580 /* If all tokens checkout, release the buffers. */
3581 if (rval == FC_SUCCESS) {
3582 /* Check all returned tokens. */
3583 for (index = 0; index < count; index++) {
3584 fc_unsol_buf_t *ubp;
3586 ub_array_index = tokens[index];
3587 ubp = ha->ub_array[ub_array_index];
3588 sp = ubp->ub_fca_private;
3590 ubp->ub_resp_flags = 0;
3591 sp->flags &= ~(SRB_UB_ACQUIRED | SRB_UB_CALLBACK);
3592 sp->flags |= SRB_UB_IN_FCA;
3594 /* IP buffer. */
3595 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
3596 ub_ip_updated = TRUE;
3601 QL_UB_UNLOCK(ha);
3602 /* Release adapter state lock. */
3603 ADAPTER_STATE_UNLOCK(ha);
3606 * XXX: We should call ql_isp_rcvbuf() to return a
3607 * buffer to ISP only if the number of buffers fall below
3608 * the low water mark.
3610 if (ub_ip_updated) {
3611 ql_isp_rcvbuf(ha);
3614 if (rval != FC_SUCCESS) {
3615 EL(ha, "failed, rval = %xh\n", rval);
3616 } else {
3617 /*EMPTY*/
3618 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
3620 return (rval);
3624 * ql_abort
3625 * Abort a packet.
3627 * Input:
3628 * fca_handle = handle setup by ql_bind_port().
3629 * pkt = pointer to fc_packet.
3630 * flags = KM_SLEEP flag.
3632 * Returns:
3633 * FC_SUCCESS - the packet has successfully aborted.
3634 * FC_ABORTED - the packet has successfully aborted.
3635 * FC_ABORTING - the packet is being aborted.
3636 * FC_ABORT_FAILED - the packet could not be aborted.
3637 * FC_TRANSPORT_ERROR - a transport error occurred while attempting
3638 * to abort the packet.
3639 * FC_BADEXCHANGE - no packet found.
3640 * FC_UNBOUND - the fca_handle specified is not bound.
3642 * Context:
3643 * Kernel context.
3645 static int
3646 ql_abort(opaque_t fca_handle, fc_packet_t *pkt, int flags)
3648 port_id_t d_id;
3649 ql_link_t *link;
3650 ql_adapter_state_t *ha, *pha;
3651 ql_srb_t *sp;
3652 ql_tgt_t *tq;
3653 ql_lun_t *lq;
3654 int rval = FC_ABORTED;
3656 ha = ql_fca_handle_to_state(fca_handle);
3657 if (ha == NULL) {
3658 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3659 (void *)fca_handle);
3660 return (FC_UNBOUND);
3663 pha = ha->pha;
3665 QL_PRINT_3(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
3667 /* Get target queue pointer. */
3668 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
3669 tq = ql_d_id_to_queue(ha, d_id);
3671 if ((tq == NULL) || (pha->task_daemon_flags & LOOP_DOWN)) {
3672 if (tq == NULL) {
3673 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
3674 rval = FC_TRANSPORT_ERROR;
3675 } else {
3676 EL(ha, "failed, FC_OFFLINE\n");
3677 rval = FC_OFFLINE;
3679 return (rval);
3682 sp = (ql_srb_t *)pkt->pkt_fca_private;
3683 lq = sp->lun_queue;
3685 /* Set poll flag if sleep wanted. */
3686 if (flags == KM_SLEEP) {
3687 sp->flags |= SRB_POLL;
3690 /* Acquire target queue lock. */
3691 DEVICE_QUEUE_LOCK(tq);
3692 REQUEST_RING_LOCK(ha);
3694 /* If command not already started. */
3695 if (!(sp->flags & SRB_ISP_STARTED)) {
3696 /* Check pending queue for command. */
3697 sp = NULL;
3698 for (link = pha->pending_cmds.first; link != NULL;
3699 link = link->next) {
3700 sp = link->base_address;
3701 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3702 /* Remove srb from q. */
3703 ql_remove_link(&pha->pending_cmds, &sp->cmd);
3704 break;
3705 } else {
3706 sp = NULL;
3709 REQUEST_RING_UNLOCK(ha);
3711 if (sp == NULL) {
3712 /* Check for cmd on device queue. */
3713 for (link = lq->cmd.first; link != NULL;
3714 link = link->next) {
3715 sp = link->base_address;
3716 if (sp == (ql_srb_t *)pkt->pkt_fca_private) {
3717 /* Remove srb from q. */
3718 ql_remove_link(&lq->cmd, &sp->cmd);
3719 break;
3720 } else {
3721 sp = NULL;
3725 /* Release device lock */
3726 DEVICE_QUEUE_UNLOCK(tq);
3728 /* If command on target queue. */
3729 if (sp != NULL) {
3730 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
3732 /* Set return status */
3733 pkt->pkt_reason = CS_ABORTED;
3735 sp->cmd.next = NULL;
3736 ql_done(&sp->cmd);
3737 rval = FC_ABORTED;
3738 } else {
3739 EL(ha, "failed, FC_BADEXCHANGE\n");
3740 rval = FC_BADEXCHANGE;
3742 } else if (sp->flags & SRB_ISP_COMPLETED) {
3743 /* Release device queue lock. */
3744 REQUEST_RING_UNLOCK(ha);
3745 DEVICE_QUEUE_UNLOCK(tq);
3746 EL(ha, "failed, already done, FC_FAILURE\n");
3747 rval = FC_FAILURE;
3748 } else if ((sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_SOLICITED_DATA) ||
3749 (sp->pkt->pkt_cmd_fhdr.r_ctl == R_CTL_STATUS)) {
3751 * If here, target data/resp ctio is with Fw.
3752 * Since firmware is supposed to terminate such I/Os
3753 * with an error, we need not do any thing. If FW
3754 * decides not to terminate those IOs and simply keep
3755 * quite then we need to initiate cleanup here by
3756 * calling ql_done.
3758 REQUEST_RING_UNLOCK(ha);
3759 DEVICE_QUEUE_UNLOCK(tq);
3760 rval = FC_ABORTED;
3761 } else {
3762 request_t *ep = pha->request_ring_bp;
3763 uint16_t cnt;
3765 if (sp->handle != 0) {
3766 for (cnt = 0; cnt < REQUEST_ENTRY_CNT; cnt++) {
3767 if (sp->handle == ddi_get32(
3768 pha->hba_buf.acc_handle, &ep->handle)) {
3769 ep->entry_type = INVALID_ENTRY_TYPE;
3770 break;
3772 ep++;
3776 /* Release device queue lock. */
3777 REQUEST_RING_UNLOCK(ha);
3778 DEVICE_QUEUE_UNLOCK(tq);
3780 sp->flags |= SRB_ABORTING;
3781 (void) ql_abort_command(ha, sp);
3782 pkt->pkt_reason = CS_ABORTED;
3783 rval = FC_ABORTED;
3786 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance, ha->vp_index);
3788 return (rval);
3792 * ql_reset
3793 * Reset link or hardware.
3795 * Input:
3796 * fca_handle = handle setup by ql_bind_port().
3797 * cmd = reset type command.
3799 * Returns:
3800 * FC_SUCCESS - reset has successfully finished.
3801 * FC_UNBOUND - the fca_handle specified is not bound.
3802 * FC_FAILURE - reset failed.
3804 * Context:
3805 * Kernel context.
3807 static int
3808 ql_reset(opaque_t fca_handle, uint32_t cmd)
3810 ql_adapter_state_t *ha;
3811 int rval = FC_SUCCESS, rval2;
3813 ha = ql_fca_handle_to_state(fca_handle);
3814 if (ha == NULL) {
3815 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
3816 (void *)fca_handle);
3817 return (FC_UNBOUND);
3820 QL_PRINT_3(CE_CONT, "(%d,%d): started, cmd=%d\n", ha->instance,
3821 ha->vp_index, cmd);
3823 switch (cmd) {
3824 case FC_FCA_CORE:
3825 /* dump firmware core if specified. */
3826 if (ha->vp_index == 0) {
3827 if (ql_dump_firmware(ha) != QL_SUCCESS) {
3828 EL(ha, "failed, FC_FAILURE\n");
3829 rval = FC_FAILURE;
3832 break;
3833 case FC_FCA_LINK_RESET:
3834 if (!(ha->pha->task_daemon_flags & LOOP_DOWN)) {
3835 if (ql_loop_reset(ha) != QL_SUCCESS) {
3836 EL(ha, "failed, FC_FAILURE-2\n");
3837 rval = FC_FAILURE;
3840 break;
3841 case FC_FCA_RESET_CORE:
3842 case FC_FCA_RESET:
3843 /* if dump firmware core if specified. */
3844 if (cmd == FC_FCA_RESET_CORE) {
3845 if (ha->vp_index != 0) {
3846 rval2 = ha->pha->task_daemon_flags & LOOP_DOWN
3847 ? QL_SUCCESS : ql_loop_reset(ha);
3848 } else {
3849 rval2 = ql_dump_firmware(ha);
3851 if (rval2 != QL_SUCCESS) {
3852 EL(ha, "failed, FC_FAILURE-3\n");
3853 rval = FC_FAILURE;
3857 /* Free up all unsolicited buffers. */
3858 if (ha->ub_allocated != 0) {
3859 /* Inform to release buffers. */
3860 ha->state = FC_PORT_SPEED_MASK(ha->state);
3861 ha->state |= FC_STATE_RESET_REQUESTED;
3862 if (ha->flags & FCA_BOUND) {
3863 (ha->bind_info.port_statec_cb)
3864 (ha->bind_info.port_handle,
3865 ha->state);
3869 ha->state = FC_PORT_SPEED_MASK(ha->state);
3871 /* All buffers freed */
3872 if (ha->ub_allocated == 0) {
3873 /* Hardware reset. */
3874 if (cmd == FC_FCA_RESET) {
3875 if (ha->vp_index == 0) {
3876 (void) ql_abort_isp(ha);
3877 } else if (!(ha->pha->task_daemon_flags &
3878 LOOP_DOWN)) {
3879 (void) ql_loop_reset(ha);
3883 /* Inform that the hardware has been reset */
3884 ha->state |= FC_STATE_RESET;
3885 } else {
3887 * the port driver expects an online if
3888 * buffers are not freed.
3890 if (ha->topology & QL_LOOP_CONNECTION) {
3891 ha->state |= FC_STATE_LOOP;
3892 } else {
3893 ha->state |= FC_STATE_ONLINE;
3897 TASK_DAEMON_LOCK(ha);
3898 ha->task_daemon_flags |= FC_STATE_CHANGE;
3899 TASK_DAEMON_UNLOCK(ha);
3901 ql_awaken_task_daemon(ha, NULL, FC_STATE_CHANGE, 0);
3903 break;
3904 default:
3905 EL(ha, "unknown cmd=%xh\n", cmd);
3906 break;
3909 if (rval != FC_SUCCESS) {
3910 EL(ha, "cmd=%xh, failed=%xh\n", cmd, rval);
3911 } else {
3912 /*EMPTY*/
3913 QL_PRINT_3(CE_CONT, "(%d,%d): done\n", ha->instance,
3914 ha->vp_index);
3917 return (rval);
3921 * ql_port_manage
3922 * Perform port management or diagnostics.
3924 * Input:
3925 * fca_handle = handle setup by ql_bind_port().
3926 * cmd = pointer to command structure.
3928 * Returns:
3929 * FC_SUCCESS - the request completed successfully.
3930 * FC_FAILURE - the request did not complete successfully.
3931 * FC_UNBOUND - the fca_handle specified is not bound.
3933 * Context:
3934 * Kernel context.
3936 static int
3937 ql_port_manage(opaque_t fca_handle, fc_fca_pm_t *cmd)
3939 clock_t timer;
3940 uint16_t index;
3941 uint32_t *bp;
3942 port_id_t d_id;
3943 ql_link_t *link;
3944 ql_adapter_state_t *ha, *pha;
3945 ql_tgt_t *tq;
3946 dma_mem_t buffer_xmt, buffer_rcv;
3947 size_t length;
3948 uint32_t cnt;
3949 char buf[80];
3950 lbp_t *lb;
3951 ql_mbx_data_t mr;
3952 app_mbx_cmd_t *mcp;
3953 int i0;
3954 uint8_t *bptr;
3955 int rval2, rval = FC_SUCCESS;
3956 uint32_t opcode;
3957 uint32_t set_flags = 0;
3959 ha = ql_fca_handle_to_state(fca_handle);
3960 if (ha == NULL) {
3961 QL_PRINT_2(CE_CONT, ": failed, no adapter=%ph\n",
3962 (void *)fca_handle);
3963 return (FC_UNBOUND);
3965 pha = ha->pha;
3967 QL_PRINT_3(CE_CONT, "(%d): started=%xh\n", ha->instance,
3968 cmd->pm_cmd_code);
3970 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
3973 * Wait for all outstanding commands to complete
3975 index = (uint16_t)ql_wait_outstanding(ha);
3977 if (index != MAX_OUTSTANDING_COMMANDS) {
3978 ql_awaken_task_daemon(ha, NULL, 0, DRIVER_STALL);
3979 ql_restart_queues(ha);
3980 EL(ha, "failed, FC_TRAN_BUSY\n");
3981 return (FC_TRAN_BUSY);
3984 switch (cmd->pm_cmd_code) {
3985 case FC_PORT_BYPASS:
3986 d_id.b24 = *cmd->pm_cmd_buf;
3987 tq = ql_d_id_to_queue(ha, d_id);
3988 if (tq == NULL || ql_loop_port_bypass(ha, tq) != QL_SUCCESS) {
3989 EL(ha, "failed, FC_PORT_BYPASS FC_FAILURE\n");
3990 rval = FC_FAILURE;
3992 break;
3993 case FC_PORT_UNBYPASS:
3994 d_id.b24 = *cmd->pm_cmd_buf;
3995 tq = ql_d_id_to_queue(ha, d_id);
3996 if (tq == NULL || ql_loop_port_enable(ha, tq) != QL_SUCCESS) {
3997 EL(ha, "failed, FC_PORT_UNBYPASS FC_FAILURE\n");
3998 rval = FC_FAILURE;
4000 break;
4001 case FC_PORT_GET_FW_REV:
4002 (void) sprintf(buf, "%d.%d.%d", pha->fw_major_version,
4003 pha->fw_minor_version, pha->fw_subminor_version);
4004 length = strlen(buf) + 1;
4005 if (cmd->pm_data_len < length) {
4006 cmd->pm_data_len = length;
4007 EL(ha, "failed, FC_PORT_GET_FW_REV FC_FAILURE\n");
4008 rval = FC_FAILURE;
4009 } else {
4010 (void) strcpy(cmd->pm_data_buf, buf);
4012 break;
4014 case FC_PORT_GET_FCODE_REV: {
4015 caddr_t fcode_ver_buf = NULL;
4017 i0 = 0;
4018 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
4019 rval2 = ddi_getlongprop(DDI_DEV_T_ANY, ha->dip,
4020 DDI_PROP_DONTPASS | DDI_PROP_CANSLEEP, "version",
4021 (caddr_t)&fcode_ver_buf, &i0);
4022 length = (uint_t)i0;
4024 if (rval2 != DDI_PROP_SUCCESS) {
4025 EL(ha, "failed, getting version = %xh\n", rval2);
4026 length = 20;
4027 fcode_ver_buf = kmem_alloc(length, KM_SLEEP);
4028 if (fcode_ver_buf != NULL) {
4029 (void) sprintf(fcode_ver_buf,
4030 "NO FCODE FOUND");
4034 if (cmd->pm_data_len < length) {
4035 EL(ha, "length error, FC_PORT_GET_FCODE_REV "
4036 "dst=%ld, src=%ld\n", cmd->pm_data_len, length);
4037 cmd->pm_data_len = length;
4038 rval = FC_FAILURE;
4039 } else if (fcode_ver_buf != NULL) {
4040 bcopy((void *)fcode_ver_buf, (void *)cmd->pm_data_buf,
4041 length);
4044 if (fcode_ver_buf != NULL) {
4045 kmem_free(fcode_ver_buf, length);
4047 break;
4050 case FC_PORT_GET_DUMP:
4051 QL_DUMP_LOCK(pha);
4052 if (cmd->pm_data_len < (size_t)pha->risc_dump_size) {
4053 EL(ha, "failed, FC_PORT_GET_DUMP incorrect "
4054 "length=%lxh\n", cmd->pm_data_len);
4055 cmd->pm_data_len = pha->risc_dump_size;
4056 rval = FC_FAILURE;
4057 } else if (pha->ql_dump_state & QL_DUMPING) {
4058 EL(ha, "failed, FC_PORT_GET_DUMP FC_TRAN_BUSY\n");
4059 rval = FC_TRAN_BUSY;
4060 } else if (pha->ql_dump_state & QL_DUMP_VALID) {
4061 (void) ql_ascii_fw_dump(ha, cmd->pm_data_buf);
4062 pha->ql_dump_state |= QL_DUMP_UPLOADED;
4063 } else {
4064 EL(ha, "failed, FC_PORT_GET_DUMP no dump file\n");
4065 rval = FC_FAILURE;
4067 QL_DUMP_UNLOCK(pha);
4068 break;
4069 case FC_PORT_FORCE_DUMP:
4070 PORTMANAGE_LOCK(ha);
4071 if (ql_dump_firmware(ha) != QL_SUCCESS) {
4072 EL(ha, "failed, FC_PORT_FORCE_DUMP FC_FAILURE\n");
4073 rval = FC_FAILURE;
4075 PORTMANAGE_UNLOCK(ha);
4076 break;
4077 case FC_PORT_DOWNLOAD_FW:
4078 PORTMANAGE_LOCK(ha);
4079 if (CFG_IST(ha, CFG_CTRL_24258081)) {
4080 if (ql_24xx_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4081 (uint32_t)cmd->pm_data_len,
4082 ha->flash_fw_addr << 2) != QL_SUCCESS) {
4083 EL(ha, "failed, FC_PORT_DOWNLOAD_FW\n");
4084 rval = FC_FAILURE;
4086 ql_reset_chip(ha);
4087 set_flags |= ISP_ABORT_NEEDED;
4088 } else {
4089 /* Save copy of the firmware. */
4090 if (pha->risc_code != NULL) {
4091 kmem_free(pha->risc_code, pha->risc_code_size);
4092 pha->risc_code = NULL;
4093 pha->risc_code_size = 0;
4096 pha->risc_code = kmem_alloc(cmd->pm_data_len,
4097 KM_SLEEP);
4098 if (pha->risc_code != NULL) {
4099 pha->risc_code_size =
4100 (uint32_t)cmd->pm_data_len;
4101 bcopy(cmd->pm_data_buf, pha->risc_code,
4102 cmd->pm_data_len);
4104 /* Do abort to force reload. */
4105 ql_reset_chip(ha);
4106 if (ql_abort_isp(ha) != QL_SUCCESS) {
4107 kmem_free(pha->risc_code,
4108 pha->risc_code_size);
4109 pha->risc_code = NULL;
4110 pha->risc_code_size = 0;
4111 ql_reset_chip(ha);
4112 (void) ql_abort_isp(ha);
4113 EL(ha, "failed, FC_PORT_DOWNLOAD_FW"
4114 " FC_FAILURE\n");
4115 rval = FC_FAILURE;
4119 PORTMANAGE_UNLOCK(ha);
4120 break;
4121 case FC_PORT_GET_DUMP_SIZE:
4122 bp = (uint32_t *)cmd->pm_data_buf;
4123 *bp = pha->risc_dump_size;
4124 break;
4125 case FC_PORT_DIAG:
4127 * Prevents concurrent diags
4129 PORTMANAGE_LOCK(ha);
4131 /* Wait for suspension to end. */
4132 for (timer = 0; timer < 3000 &&
4133 pha->task_daemon_flags & QL_LOOP_TRANSITION; timer++) {
4134 ql_delay(ha, 10000);
4137 if (pha->task_daemon_flags & QL_LOOP_TRANSITION) {
4138 EL(ha, "failed, FC_TRAN_BUSY-2\n");
4139 rval = FC_TRAN_BUSY;
4140 PORTMANAGE_UNLOCK(ha);
4141 break;
4144 switch (cmd->pm_cmd_flags) {
4145 case QL_DIAG_EXEFMW:
4146 if (ql_start_firmware(ha) != QL_SUCCESS) {
4147 EL(ha, "failed, QL_DIAG_EXEFMW FC_FAILURE\n");
4148 rval = FC_FAILURE;
4150 break;
4151 case QL_DIAG_CHKCMDQUE:
4152 for (i0 = 1, cnt = 0; i0 < MAX_OUTSTANDING_COMMANDS;
4153 i0++) {
4154 cnt += (pha->outstanding_cmds[i0] != NULL);
4156 if (cnt != 0) {
4157 EL(ha, "failed, QL_DIAG_CHKCMDQUE "
4158 "FC_FAILURE\n");
4159 rval = FC_FAILURE;
4161 break;
4162 case QL_DIAG_FMWCHKSUM:
4163 if (ql_verify_checksum(ha) != QL_SUCCESS) {
4164 EL(ha, "failed, QL_DIAG_FMWCHKSUM "
4165 "FC_FAILURE\n");
4166 rval = FC_FAILURE;
4168 break;
4169 case QL_DIAG_SLFTST:
4170 if (ql_online_selftest(ha) != QL_SUCCESS) {
4171 EL(ha, "failed, QL_DIAG_SLFTST FC_FAILURE\n");
4172 rval = FC_FAILURE;
4174 ql_reset_chip(ha);
4175 set_flags |= ISP_ABORT_NEEDED;
4176 break;
4177 case QL_DIAG_REVLVL:
4178 if (cmd->pm_stat_len <
4179 sizeof (ql_adapter_revlvl_t)) {
4180 EL(ha, "failed, QL_DIAG_REVLVL FC_NOMEM, "
4181 "slen=%lxh, rlvllen=%lxh\n",
4182 cmd->pm_stat_len,
4183 sizeof (ql_adapter_revlvl_t));
4184 rval = FC_NOMEM;
4185 } else {
4186 bcopy((void *)&(pha->adapter_stats->revlvl),
4187 cmd->pm_stat_buf,
4188 (size_t)cmd->pm_stat_len);
4189 cmd->pm_stat_len =
4190 sizeof (ql_adapter_revlvl_t);
4192 break;
4193 case QL_DIAG_LPBMBX:
4195 if (cmd->pm_data_len != sizeof (struct app_mbx_cmd)) {
4196 EL(ha, "failed, QL_DIAG_LPBMBX "
4197 "FC_INVALID_REQUEST, pmlen=%lxh, "
4198 "reqd=%lxh\n", cmd->pm_data_len,
4199 sizeof (struct app_mbx_cmd));
4200 rval = FC_INVALID_REQUEST;
4201 break;
4204 * Don't do the wrap test on a 2200 when the
4205 * firmware is running.
4207 if (!CFG_IST(ha, CFG_CTRL_2200)) {
4208 mcp = (app_mbx_cmd_t *)cmd->pm_data_buf;
4209 mr.mb[1] = mcp->mb[1];
4210 mr.mb[2] = mcp->mb[2];
4211 mr.mb[3] = mcp->mb[3];
4212 mr.mb[4] = mcp->mb[4];
4213 mr.mb[5] = mcp->mb[5];
4214 mr.mb[6] = mcp->mb[6];
4215 mr.mb[7] = mcp->mb[7];
4217 bcopy(&mr.mb[0], &mr.mb[10],
4218 sizeof (uint16_t) * 8);
4220 if (ql_mbx_wrap_test(ha, &mr) != QL_SUCCESS) {
4221 EL(ha, "failed, QL_DIAG_LPBMBX "
4222 "FC_FAILURE\n");
4223 rval = FC_FAILURE;
4224 break;
4225 } else {
4226 for (i0 = 1; i0 < 8; i0++) {
4227 if (mr.mb[i0] !=
4228 mr.mb[i0 + 10]) {
4229 EL(ha, "failed, "
4230 "QL_DIAG_LPBMBX "
4231 "FC_FAILURE-2\n");
4232 rval = FC_FAILURE;
4233 break;
4238 if (rval == FC_FAILURE) {
4239 (void) ql_flash_errlog(ha,
4240 FLASH_ERRLOG_ISP_ERR, 0,
4241 RD16_IO_REG(ha, hccr),
4242 RD16_IO_REG(ha, istatus));
4243 set_flags |= ISP_ABORT_NEEDED;
4246 break;
4247 case QL_DIAG_LPBDTA:
4249 * For loopback data, we receive the
4250 * data back in pm_stat_buf. This provides
4251 * the user an opportunity to compare the
4252 * transmitted and received data.
4254 * NB: lb->options are:
4255 * 0 --> Ten bit loopback
4256 * 1 --> One bit loopback
4257 * 2 --> External loopback
4259 if (cmd->pm_data_len > 65536) {
4260 rval = FC_TOOMANY;
4261 EL(ha, "failed, QL_DIAG_LPBDTA "
4262 "FC_TOOMANY=%lxh\n", cmd->pm_data_len);
4263 break;
4265 if (ql_get_dma_mem(ha, &buffer_xmt,
4266 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4267 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4268 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM\n");
4269 rval = FC_NOMEM;
4270 break;
4272 if (ql_get_dma_mem(ha, &buffer_rcv,
4273 (uint32_t)cmd->pm_data_len, LITTLE_ENDIAN_DMA,
4274 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4275 EL(ha, "failed, QL_DIAG_LPBDTA FC_NOMEM-2\n");
4276 rval = FC_NOMEM;
4277 break;
4279 ddi_rep_put8(buffer_xmt.acc_handle,
4280 (uint8_t *)cmd->pm_data_buf,
4281 (uint8_t *)buffer_xmt.bp,
4282 cmd->pm_data_len, DDI_DEV_AUTOINCR);
4284 /* 22xx's adapter must be in loop mode for test. */
4285 if (CFG_IST(ha, CFG_CTRL_2200)) {
4286 bptr = &ha->init_ctrl_blk.cb.add_fw_opt[0];
4287 if (ha->flags & POINT_TO_POINT ||
4288 (ha->task_daemon_flags & LOOP_DOWN &&
4289 *bptr & (BIT_6 | BIT_5 | BIT_4))) {
4290 cnt = *bptr;
4291 *bptr = (uint8_t)
4292 (*bptr & ~(BIT_6|BIT_5|BIT_4));
4293 (void) ql_abort_isp(ha);
4294 *bptr = (uint8_t)cnt;
4298 /* Shutdown IP. */
4299 if (pha->flags & IP_INITIALIZED) {
4300 (void) ql_shutdown_ip(pha);
4303 lb = (lbp_t *)cmd->pm_cmd_buf;
4304 lb->transfer_count =
4305 (uint32_t)cmd->pm_data_len;
4306 lb->transfer_segment_count = 0;
4307 lb->receive_segment_count = 0;
4308 lb->transfer_data_address =
4309 buffer_xmt.cookie.dmac_address;
4310 lb->receive_data_address =
4311 buffer_rcv.cookie.dmac_address;
4313 if (ql_loop_back(ha, 0, lb,
4314 buffer_xmt.cookie.dmac_notused,
4315 buffer_rcv.cookie.dmac_notused) == QL_SUCCESS) {
4316 bzero((void *)cmd->pm_stat_buf,
4317 cmd->pm_stat_len);
4318 ddi_rep_get8(buffer_rcv.acc_handle,
4319 (uint8_t *)cmd->pm_stat_buf,
4320 (uint8_t *)buffer_rcv.bp,
4321 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4322 rval = FC_SUCCESS;
4323 } else {
4324 EL(ha, "failed, QL_DIAG_LPBDTA FC_FAILURE\n");
4325 rval = FC_FAILURE;
4328 ql_free_phys(ha, &buffer_xmt);
4329 ql_free_phys(ha, &buffer_rcv);
4331 /* Needed to recover the f/w */
4332 set_flags |= ISP_ABORT_NEEDED;
4334 /* Restart IP if it was shutdown. */
4335 if (pha->flags & IP_ENABLED &&
4336 !(pha->flags & IP_INITIALIZED)) {
4337 (void) ql_initialize_ip(pha);
4338 ql_isp_rcvbuf(pha);
4341 break;
4342 case QL_DIAG_ECHO: {
4344 * issue an echo command with a user supplied
4345 * data pattern and destination address
4347 echo_t echo; /* temp echo struct */
4349 /* Setup echo cmd & adjust for platform */
4350 opcode = QL_ECHO_CMD;
4351 BIG_ENDIAN_32(&opcode);
4354 * due to limitations in the ql
4355 * firmaware the echo data field is
4356 * limited to 220
4358 if ((cmd->pm_cmd_len > QL_ECHO_CMD_LENGTH) ||
4359 (cmd->pm_stat_len > QL_ECHO_CMD_LENGTH)) {
4360 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY, "
4361 "cmdl1=%lxh, statl2=%lxh\n",
4362 cmd->pm_cmd_len, cmd->pm_stat_len);
4363 rval = FC_TOOMANY;
4364 break;
4368 * the input data buffer has the user
4369 * supplied data pattern. The "echoed"
4370 * data will be DMAed into the output
4371 * data buffer. Therefore the length
4372 * of the output buffer must be equal
4373 * to or greater then the input buffer
4374 * length
4376 if (cmd->pm_cmd_len > cmd->pm_stat_len) {
4377 EL(ha, "failed, QL_DIAG_ECHO FC_TOOMANY-2,"
4378 " cmdl1=%lxh, statl2=%lxh\n",
4379 cmd->pm_cmd_len, cmd->pm_stat_len);
4380 rval = FC_TOOMANY;
4381 break;
4383 /* add four bytes for the opcode */
4384 echo.transfer_count = (uint32_t)(cmd->pm_cmd_len + 4);
4387 * are we 32 or 64 bit addressed???
4388 * We need to get the appropriate
4389 * DMA and set the command options;
4390 * 64 bit (bit 6) or 32 bit
4391 * (no bit 6) addressing.
4392 * while we are at it lets ask for
4393 * real echo (bit 15)
4395 echo.options = BIT_15;
4396 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) &&
4397 !(CFG_IST(ha, CFG_CTRL_8081))) {
4398 echo.options = (uint16_t)
4399 (echo.options | BIT_6);
4403 * Set up the DMA mappings for the
4404 * output and input data buffers.
4405 * First the output buffer
4407 if (ql_get_dma_mem(ha, &buffer_xmt,
4408 (uint32_t)(cmd->pm_data_len + 4),
4409 LITTLE_ENDIAN_DMA,
4410 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4411 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM\n");
4412 rval = FC_NOMEM;
4413 break;
4415 echo.transfer_data_address = buffer_xmt.cookie;
4417 /* Next the input buffer */
4418 if (ql_get_dma_mem(ha, &buffer_rcv,
4419 (uint32_t)(cmd->pm_data_len + 4),
4420 LITTLE_ENDIAN_DMA,
4421 QL_DMA_DATA_ALIGN) != QL_SUCCESS) {
4423 * since we could not allocate
4424 * DMA space for the input
4425 * buffer we need to clean up
4426 * by freeing the DMA space
4427 * we allocated for the output
4428 * buffer
4430 ql_free_phys(ha, &buffer_xmt);
4431 EL(ha, "failed, QL_DIAG_ECHO FC_NOMEM-2\n");
4432 rval = FC_NOMEM;
4433 break;
4435 echo.receive_data_address = buffer_rcv.cookie;
4438 * copy the 4 byte ECHO op code to the
4439 * allocated DMA space
4441 ddi_rep_put8(buffer_xmt.acc_handle, (uint8_t *)&opcode,
4442 (uint8_t *)buffer_xmt.bp, 4, DDI_DEV_AUTOINCR);
4445 * copy the user supplied data to the
4446 * allocated DMA space
4448 ddi_rep_put8(buffer_xmt.acc_handle,
4449 (uint8_t *)cmd->pm_cmd_buf,
4450 (uint8_t *)buffer_xmt.bp + 4, cmd->pm_cmd_len,
4451 DDI_DEV_AUTOINCR);
4453 /* Shutdown IP. */
4454 if (pha->flags & IP_INITIALIZED) {
4455 (void) ql_shutdown_ip(pha);
4458 /* send the echo */
4459 if (ql_echo(ha, 0, &echo) == QL_SUCCESS) {
4460 ddi_rep_put8(buffer_rcv.acc_handle,
4461 (uint8_t *)buffer_rcv.bp + 4,
4462 (uint8_t *)cmd->pm_stat_buf,
4463 cmd->pm_stat_len, DDI_DEV_AUTOINCR);
4464 } else {
4465 EL(ha, "failed, QL_DIAG_ECHO FC_FAILURE\n");
4466 rval = FC_FAILURE;
4469 /* Restart IP if it was shutdown. */
4470 if (pha->flags & IP_ENABLED &&
4471 !(pha->flags & IP_INITIALIZED)) {
4472 (void) ql_initialize_ip(pha);
4473 ql_isp_rcvbuf(pha);
4475 /* free up our DMA buffers */
4476 ql_free_phys(ha, &buffer_xmt);
4477 ql_free_phys(ha, &buffer_rcv);
4478 break;
4480 default:
4481 EL(ha, "unknown=%xh, FC_PORT_DIAG "
4482 "FC_INVALID_REQUEST\n", cmd->pm_cmd_flags);
4483 rval = FC_INVALID_REQUEST;
4484 break;
4486 PORTMANAGE_UNLOCK(ha);
4487 break;
4488 case FC_PORT_LINK_STATE:
4489 /* Check for name equal to null. */
4490 for (index = 0; index < 8 && index < cmd->pm_cmd_len;
4491 index++) {
4492 if (cmd->pm_cmd_buf[index] != 0) {
4493 break;
4497 /* If name not null. */
4498 if (index < 8 && cmd->pm_cmd_len >= 8) {
4499 /* Locate device queue. */
4500 tq = NULL;
4501 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4502 tq == NULL; index++) {
4503 for (link = ha->dev[index].first; link != NULL;
4504 link = link->next) {
4505 tq = link->base_address;
4507 if (bcmp((void *)&tq->port_name[0],
4508 (void *)cmd->pm_cmd_buf, 8) == 0) {
4509 break;
4510 } else {
4511 tq = NULL;
4516 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id)) {
4517 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4518 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4519 } else {
4520 cnt = FC_PORT_SPEED_MASK(ha->state) |
4521 FC_STATE_OFFLINE;
4522 cmd->pm_stat_buf[0] = (int8_t)LSB(cnt);
4523 cmd->pm_stat_buf[1] = (int8_t)MSB(cnt);
4525 } else {
4526 cmd->pm_stat_buf[0] = (int8_t)LSB(ha->state);
4527 cmd->pm_stat_buf[1] = (int8_t)MSB(ha->state);
4529 break;
4530 case FC_PORT_INITIALIZE:
4531 if (cmd->pm_cmd_len >= 8) {
4532 tq = NULL;
4533 for (index = 0; index < DEVICE_HEAD_LIST_SIZE &&
4534 tq == NULL; index++) {
4535 for (link = ha->dev[index].first; link != NULL;
4536 link = link->next) {
4537 tq = link->base_address;
4539 if (bcmp((void *)&tq->port_name[0],
4540 (void *)cmd->pm_cmd_buf, 8) == 0) {
4541 if (!VALID_DEVICE_ID(ha,
4542 tq->loop_id)) {
4543 tq = NULL;
4545 break;
4546 } else {
4547 tq = NULL;
4552 if (tq == NULL || ql_target_reset(ha, tq,
4553 ha->loop_reset_delay) != QL_SUCCESS) {
4554 EL(ha, "failed, FC_PORT_INITIALIZE "
4555 "FC_FAILURE\n");
4556 rval = FC_FAILURE;
4558 } else {
4559 EL(ha, "failed, FC_PORT_INITIALIZE FC_FAILURE-2, "
4560 "clen=%lxh\n", cmd->pm_cmd_len);
4562 rval = FC_FAILURE;
4564 break;
4565 case FC_PORT_RLS:
4566 if (cmd->pm_data_len < sizeof (fc_rls_acc_t)) {
4567 EL(ha, "failed, buffer size passed: %lxh, "
4568 "req: %lxh\n", cmd->pm_data_len,
4569 (sizeof (fc_rls_acc_t)));
4570 rval = FC_FAILURE;
4571 } else if (LOOP_NOT_READY(pha)) {
4572 EL(ha, "loop NOT ready\n");
4573 bzero(cmd->pm_data_buf, cmd->pm_data_len);
4574 } else if (ql_get_link_status(ha, ha->loop_id,
4575 cmd->pm_data_len, cmd->pm_data_buf, 0) != QL_SUCCESS) {
4576 EL(ha, "failed, FC_PORT_RLS FC_FAILURE\n");
4577 rval = FC_FAILURE;
4578 #ifdef _BIG_ENDIAN
4579 } else {
4580 fc_rls_acc_t *rls;
4582 rls = (fc_rls_acc_t *)cmd->pm_data_buf;
4583 LITTLE_ENDIAN_32(&rls->rls_link_fail);
4584 LITTLE_ENDIAN_32(&rls->rls_sync_loss);
4585 LITTLE_ENDIAN_32(&rls->rls_sig_loss);
4586 LITTLE_ENDIAN_32(&rls->rls_invalid_crc);
4587 #endif /* _BIG_ENDIAN */
4589 break;
4590 case FC_PORT_GET_NODE_ID:
4591 if (ql_get_rnid_params(ha, cmd->pm_data_len,
4592 cmd->pm_data_buf) != QL_SUCCESS) {
4593 EL(ha, "failed, FC_PORT_GET_NODE_ID FC_FAILURE\n");
4594 rval = FC_FAILURE;
4596 break;
4597 case FC_PORT_SET_NODE_ID:
4598 if (ql_set_rnid_params(ha, cmd->pm_data_len,
4599 cmd->pm_data_buf) != QL_SUCCESS) {
4600 EL(ha, "failed, FC_PORT_SET_NODE_ID FC_FAILURE\n");
4601 rval = FC_FAILURE;
4603 break;
4604 case FC_PORT_DOWNLOAD_FCODE:
4605 PORTMANAGE_LOCK(ha);
4606 if ((CFG_IST(ha, CFG_CTRL_24258081)) == 0) {
4607 rval = ql_load_flash(ha, (uint8_t *)cmd->pm_data_buf,
4608 (uint32_t)cmd->pm_data_len);
4609 } else {
4610 if (cmd->pm_data_buf[0] == 4 &&
4611 cmd->pm_data_buf[8] == 0 &&
4612 cmd->pm_data_buf[9] == 0x10 &&
4613 cmd->pm_data_buf[10] == 0 &&
4614 cmd->pm_data_buf[11] == 0) {
4615 rval = ql_24xx_load_flash(ha,
4616 (uint8_t *)cmd->pm_data_buf,
4617 (uint32_t)cmd->pm_data_len,
4618 ha->flash_fw_addr << 2);
4619 } else {
4620 rval = ql_24xx_load_flash(ha,
4621 (uint8_t *)cmd->pm_data_buf,
4622 (uint32_t)cmd->pm_data_len, 0);
4626 if (rval != QL_SUCCESS) {
4627 EL(ha, "failed, FC_PORT_DOWNLOAD_FCODE FC_FAILURE\n");
4628 rval = FC_FAILURE;
4629 } else {
4630 rval = FC_SUCCESS;
4632 ql_reset_chip(ha);
4633 set_flags |= ISP_ABORT_NEEDED;
4634 PORTMANAGE_UNLOCK(ha);
4635 break;
4636 default:
4637 EL(ha, "unknown=%xh, FC_BADCMD\n", cmd->pm_cmd_code);
4638 rval = FC_BADCMD;
4639 break;
4642 /* Wait for suspension to end. */
4643 ql_awaken_task_daemon(ha, NULL, set_flags, DRIVER_STALL);
4644 timer = 0;
4646 while (timer++ < 3000 &&
4647 ha->task_daemon_flags & (QL_LOOP_TRANSITION | DRIVER_STALL)) {
4648 ql_delay(ha, 10000);
4651 ql_restart_queues(ha);
4653 if (rval != FC_SUCCESS) {
4654 EL(ha, "failed, rval = %xh\n", rval);
4655 } else {
4656 /*EMPTY*/
4657 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4660 return (rval);
4663 static opaque_t
4664 ql_get_device(opaque_t fca_handle, fc_portid_t d_id)
4666 port_id_t id;
4667 ql_adapter_state_t *ha;
4668 ql_tgt_t *tq;
4670 id.r.rsvd_1 = 0;
4671 id.b24 = d_id.port_id;
4673 ha = ql_fca_handle_to_state(fca_handle);
4674 if (ha == NULL) {
4675 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4676 (void *)fca_handle);
4677 return (NULL);
4679 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance, id.b24);
4681 tq = ql_d_id_to_queue(ha, id);
4683 if (tq == NULL) {
4684 EL(ha, "failed, tq=NULL\n");
4685 } else {
4686 /*EMPTY*/
4687 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4689 return (tq);
4692 /* ************************************************************************ */
4693 /* FCA Driver Local Support Functions. */
4694 /* ************************************************************************ */
4697 * ql_cmd_setup
4698 * Verifies proper command.
4700 * Input:
4701 * fca_handle = handle setup by ql_bind_port().
4702 * pkt = pointer to fc_packet.
4703 * rval = pointer for return value.
4705 * Returns:
4706 * Adapter state pointer, NULL = failure.
4708 * Context:
4709 * Kernel context.
4711 static ql_adapter_state_t *
4712 ql_cmd_setup(opaque_t fca_handle, fc_packet_t *pkt, int *rval)
4714 ql_adapter_state_t *ha, *pha;
4715 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
4716 ql_tgt_t *tq;
4717 port_id_t d_id;
4719 pkt->pkt_resp_resid = 0;
4720 pkt->pkt_data_resid = 0;
4722 /* check that the handle is assigned by this FCA */
4723 ha = ql_fca_handle_to_state(fca_handle);
4724 if (ha == NULL) {
4725 *rval = FC_UNBOUND;
4726 QL_PRINT_2(CE_CONT, "failed, no adapter=%ph\n",
4727 (void *)fca_handle);
4728 return (NULL);
4730 pha = ha->pha;
4732 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
4734 if (ddi_in_panic() || pkt->pkt_tran_flags & FC_TRAN_DUMPING) {
4735 return (ha);
4738 if (!(pha->flags & ONLINE)) {
4739 pkt->pkt_state = FC_PKT_LOCAL_RJT;
4740 pkt->pkt_reason = FC_REASON_HW_ERROR;
4741 *rval = FC_TRANSPORT_ERROR;
4742 EL(ha, "failed, not online hf=%xh\n", pha->flags);
4743 return (NULL);
4746 /* Exit on loop down. */
4747 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING) &&
4748 pha->task_daemon_flags & LOOP_DOWN &&
4749 pha->loop_down_timer <= pha->loop_down_abort_time) {
4750 pkt->pkt_state = FC_PKT_PORT_OFFLINE;
4751 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4752 *rval = FC_OFFLINE;
4753 EL(ha, "failed, loop down tdf=%xh\n", pha->task_daemon_flags);
4754 return (NULL);
4757 if (pkt->pkt_cmd_fhdr.r_ctl == R_CTL_COMMAND &&
4758 pkt->pkt_cmd_fhdr.type == FC_TYPE_SCSI_FCP) {
4759 tq = (ql_tgt_t *)pkt->pkt_fca_device;
4760 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id))) {
4761 d_id.r.rsvd_1 = 0;
4762 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4763 tq = ql_d_id_to_queue(ha, d_id);
4765 pkt->pkt_fca_device = (opaque_t)tq;
4768 if (tq != NULL) {
4769 DEVICE_QUEUE_LOCK(tq);
4770 if (tq->flags & (TQF_RSCN_RCVD |
4771 TQF_NEED_AUTHENTICATION)) {
4772 *rval = FC_DEVICE_BUSY;
4773 DEVICE_QUEUE_UNLOCK(tq);
4774 EL(ha, "failed, busy qf=%xh, d_id=%xh\n",
4775 tq->flags, tq->d_id.b24);
4776 return (NULL);
4778 DEVICE_QUEUE_UNLOCK(tq);
4783 * Check DMA pointers.
4785 *rval = DDI_SUCCESS;
4786 if (pkt->pkt_cmd_acc != NULL && pkt->pkt_cmdlen) {
4787 QL_CLEAR_DMA_HANDLE(pkt->pkt_cmd_dma);
4788 *rval = ddi_check_dma_handle(pkt->pkt_cmd_dma);
4789 if (*rval == DDI_SUCCESS) {
4790 *rval = ddi_check_acc_handle(pkt->pkt_cmd_acc);
4794 if (pkt->pkt_resp_acc != NULL && *rval == DDI_SUCCESS &&
4795 pkt->pkt_rsplen != 0) {
4796 QL_CLEAR_DMA_HANDLE(pkt->pkt_resp_dma);
4797 *rval = ddi_check_dma_handle(pkt->pkt_resp_dma);
4798 if (*rval == DDI_SUCCESS) {
4799 *rval = ddi_check_acc_handle(pkt->pkt_resp_acc);
4804 * Minimum branch conditional; Change it with care.
4806 if (((pkt->pkt_data_acc != NULL) & (*rval == DDI_SUCCESS) &
4807 (pkt->pkt_datalen != 0)) != 0) {
4808 QL_CLEAR_DMA_HANDLE(pkt->pkt_data_dma);
4809 *rval = ddi_check_dma_handle(pkt->pkt_data_dma);
4810 if (*rval == DDI_SUCCESS) {
4811 *rval = ddi_check_acc_handle(pkt->pkt_data_acc);
4815 if (*rval != DDI_SUCCESS) {
4816 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4817 pkt->pkt_reason = FC_REASON_DMA_ERROR;
4819 /* Do command callback. */
4820 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
4821 ql_awaken_task_daemon(ha, sp, 0, 0);
4823 *rval = FC_BADPACKET;
4824 EL(ha, "failed, bad DMA pointers\n");
4825 return (NULL);
4828 if (sp->magic_number != QL_FCA_BRAND) {
4829 *rval = FC_BADPACKET;
4830 EL(ha, "failed, magic number=%xh\n", sp->magic_number);
4831 return (NULL);
4833 *rval = FC_SUCCESS;
4835 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
4837 return (ha);
4841 * ql_els_plogi
4842 * Issue a extended link service port login request.
4844 * Input:
4845 * ha = adapter state pointer.
4846 * pkt = pointer to fc_packet.
4848 * Returns:
4849 * FC_SUCCESS - the packet was accepted for transport.
4850 * FC_TRANSPORT_ERROR - a transport error occurred.
4852 * Context:
4853 * Kernel context.
4855 static int
4856 ql_els_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
4858 ql_tgt_t *tq = NULL;
4859 port_id_t d_id;
4860 la_els_logi_t acc;
4861 class_svc_param_t *class3_param;
4862 int ret;
4863 int rval = FC_SUCCESS;
4865 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
4866 pkt->pkt_cmd_fhdr.d_id);
4868 TASK_DAEMON_LOCK(ha);
4869 if (!(ha->task_daemon_flags & STATE_ONLINE)) {
4870 TASK_DAEMON_UNLOCK(ha);
4871 QL_PRINT_3(CE_CONT, "(%d): offline done\n", ha->instance);
4872 return (FC_OFFLINE);
4874 TASK_DAEMON_UNLOCK(ha);
4876 bzero(&acc, sizeof (acc));
4877 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
4879 ret = QL_SUCCESS;
4881 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
4883 * In p2p topology it sends a PLOGI after determining
4884 * it has the N_Port login initiative.
4886 ret = ql_p2p_plogi(ha, pkt);
4888 if (ret == QL_CONSUMED) {
4889 return (ret);
4892 switch (ret = ql_login_port(ha, d_id)) {
4893 case QL_SUCCESS:
4894 tq = ql_d_id_to_queue(ha, d_id);
4895 break;
4897 case QL_LOOP_ID_USED:
4898 if ((ret = ql_login_port(ha, d_id)) == QL_SUCCESS) {
4899 tq = ql_d_id_to_queue(ha, d_id);
4901 break;
4903 default:
4904 break;
4907 if (ret != QL_SUCCESS) {
4909 * Invalidate this entry so as to seek a fresh loop ID
4910 * in case firmware reassigns it to something else
4912 tq = ql_d_id_to_queue(ha, d_id);
4913 if (tq && (ret != QL_MEMORY_ALLOC_FAILED)) {
4914 tq->loop_id = PORT_NO_LOOP_ID;
4916 } else if (tq) {
4917 (void) ql_get_port_database(ha, tq, PDF_ADISC);
4920 if (tq != NULL && VALID_DEVICE_ID(ha, tq->loop_id) &&
4921 (ret != QL_MEMORY_ALLOC_FAILED) && PD_PORT_LOGIN(tq)) {
4923 /* Build ACC. */
4924 acc.ls_code.ls_code = LA_ELS_ACC;
4925 acc.common_service.fcph_version = 0x2006;
4926 acc.common_service.cmn_features = 0x8800;
4927 acc.common_service.rx_bufsize = QL_MAX_FRAME_SIZE(ha);
4928 acc.common_service.conc_sequences = 0xff;
4929 acc.common_service.relative_offset = 0x03;
4930 acc.common_service.e_d_tov = 0x7d0;
4932 bcopy((void *)&tq->port_name[0],
4933 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
4934 bcopy((void *)&tq->node_name[0],
4935 (void *)&acc.node_ww_name.raw_wwn[0], 8);
4937 class3_param = (class_svc_param_t *)&acc.class_3;
4938 class3_param->class_valid_svc_opt = 0x8000;
4939 class3_param->recipient_ctl = tq->class3_recipient_ctl;
4940 class3_param->rcv_data_size = tq->class3_rcv_data_size;
4941 class3_param->conc_sequences = tq->class3_conc_sequences;
4942 class3_param->open_sequences_per_exch =
4943 tq->class3_open_sequences_per_exch;
4945 if ((ql_busy_plogi(ha, pkt, tq) == FC_TRAN_BUSY)) {
4946 acc.ls_code.ls_code = LA_ELS_RJT;
4947 pkt->pkt_state = FC_PKT_TRAN_BSY;
4948 pkt->pkt_reason = FC_REASON_XCHG_BSY;
4949 EL(ha, "LA_ELS_RJT, FC_REASON_XCHG_BSY\n");
4950 rval = FC_TRAN_BUSY;
4951 } else {
4952 DEVICE_QUEUE_LOCK(tq);
4953 tq->logout_sent = 0;
4954 tq->flags &= ~TQF_NEED_AUTHENTICATION;
4955 if (CFG_IST(ha, CFG_CTRL_242581)) {
4956 tq->flags |= TQF_IIDMA_NEEDED;
4958 DEVICE_QUEUE_UNLOCK(tq);
4960 if (CFG_IST(ha, CFG_CTRL_242581)) {
4961 TASK_DAEMON_LOCK(ha);
4962 ha->task_daemon_flags |= TD_IIDMA_NEEDED;
4963 TASK_DAEMON_UNLOCK(ha);
4966 pkt->pkt_state = FC_PKT_SUCCESS;
4968 } else {
4969 /* Build RJT. */
4970 acc.ls_code.ls_code = LA_ELS_RJT;
4972 switch (ret) {
4973 case QL_FUNCTION_TIMEOUT:
4974 pkt->pkt_state = FC_PKT_TIMEOUT;
4975 pkt->pkt_reason = FC_REASON_HW_ERROR;
4976 break;
4978 case QL_MEMORY_ALLOC_FAILED:
4979 pkt->pkt_state = FC_PKT_LOCAL_BSY;
4980 pkt->pkt_reason = FC_REASON_NOMEM;
4981 rval = FC_TRAN_BUSY;
4982 break;
4984 case QL_FABRIC_NOT_INITIALIZED:
4985 pkt->pkt_state = FC_PKT_FABRIC_BSY;
4986 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4987 rval = FC_TRAN_BUSY;
4988 break;
4990 default:
4991 pkt->pkt_state = FC_PKT_TRAN_ERROR;
4992 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
4993 break;
4996 EL(ha, "Plogi unsuccess for %xh state %xh reason %xh "
4997 "ret %xh rval %xh\n", d_id.b24, pkt->pkt_state,
4998 pkt->pkt_reason, ret, rval);
5001 if (tq != NULL) {
5002 DEVICE_QUEUE_LOCK(tq);
5003 tq->flags &= ~(TQF_PLOGI_PROGRS | TQF_QUEUE_SUSPENDED);
5004 if (rval == FC_TRAN_BUSY) {
5005 if (tq->d_id.b24 != BROADCAST_ADDR) {
5006 tq->flags |= TQF_NEED_AUTHENTICATION;
5009 DEVICE_QUEUE_UNLOCK(tq);
5012 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5013 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5015 if (rval != FC_SUCCESS) {
5016 EL(ha, "failed, rval = %xh\n", rval);
5017 } else {
5018 /*EMPTY*/
5019 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5021 return (rval);
5025 * ql_p2p_plogi
5026 * Start an extended link service port login request using
5027 * an ELS Passthru iocb.
5029 * Input:
5030 * ha = adapter state pointer.
5031 * pkt = pointer to fc_packet.
5033 * Returns:
5034 * QL_CONSUMMED - the iocb was queued for transport.
5036 * Context:
5037 * Kernel context.
5039 static int
5040 ql_p2p_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5042 uint16_t id;
5043 ql_tgt_t tmp;
5044 ql_tgt_t *tq = &tmp;
5045 int rval;
5046 port_id_t d_id;
5047 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5049 tq->d_id.b.al_pa = 0;
5050 tq->d_id.b.area = 0;
5051 tq->d_id.b.domain = 0;
5054 * Verify that the port database hasn't moved beneath our feet by
5055 * switching to the appropriate n_port_handle if necessary. This is
5056 * less unplesant than the error recovery if the wrong one is used.
5058 for (id = 0; id <= LAST_LOCAL_LOOP_ID; id++) {
5059 tq->loop_id = id;
5060 rval = ql_get_port_database(ha, tq, PDF_NONE);
5061 EL(ha, "rval=%xh\n", rval);
5062 /* check all the ones not logged in for possible use */
5063 if (rval == QL_NOT_LOGGED_IN) {
5064 if (tq->master_state == PD_STATE_PLOGI_PENDING) {
5065 ha->n_port->n_port_handle = tq->loop_id;
5066 EL(ha, "n_port_handle =%xh, master state=%x\n",
5067 tq->loop_id, tq->master_state);
5068 break;
5071 * Use a 'port unavailable' entry only
5072 * if we used it before.
5074 if (tq->master_state == PD_STATE_PORT_UNAVAILABLE) {
5075 /* if the port_id matches, reuse it */
5076 if (pkt->pkt_cmd_fhdr.d_id == tq->d_id.b24) {
5077 EL(ha, "n_port_handle =%xh,"
5078 "master state=%xh\n",
5079 tq->loop_id, tq->master_state);
5080 break;
5081 } else if (tq->loop_id ==
5082 ha->n_port->n_port_handle) {
5083 // avoid a lint error
5084 uint16_t *hndl;
5085 uint16_t val;
5087 hndl = &ha->n_port->n_port_handle;
5088 val = *hndl;
5089 val++;
5090 val++;
5091 *hndl = val;
5093 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5094 "master state=%x\n", rval, id, tq->loop_id,
5095 tq->master_state);
5099 if (rval == QL_SUCCESS) {
5100 if ((tq->flags & TQF_INITIATOR_DEVICE) == 0) {
5101 ha->n_port->n_port_handle = tq->loop_id;
5102 EL(ha, "n_port_handle =%xh, master state=%x\n",
5103 tq->loop_id, tq->master_state);
5104 break;
5106 EL(ha, "rval=%xh, id=%d, n_port_handle =%xh, "
5107 "master state=%x\n", rval, id, tq->loop_id,
5108 tq->master_state);
5111 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0, DDI_DMA_SYNC_FORDEV);
5113 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5114 tq = ql_d_id_to_queue(ha, d_id);
5115 ql_timeout_insert(ha, tq, sp);
5116 ql_start_iocb(ha, sp);
5118 return (QL_CONSUMED);
5123 * ql_els_flogi
5124 * Issue a extended link service fabric login request.
5126 * Input:
5127 * ha = adapter state pointer.
5128 * pkt = pointer to fc_packet.
5130 * Returns:
5131 * FC_SUCCESS - the packet was accepted for transport.
5132 * FC_TRANSPORT_ERROR - a transport error occurred.
5134 * Context:
5135 * Kernel context.
5137 static int
5138 ql_els_flogi(ql_adapter_state_t *ha, fc_packet_t *pkt)
5140 ql_tgt_t *tq = NULL;
5141 port_id_t d_id;
5142 la_els_logi_t acc;
5143 class_svc_param_t *class3_param;
5144 int rval = FC_SUCCESS;
5145 int accept = 0;
5147 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5148 pkt->pkt_cmd_fhdr.d_id);
5150 bzero(&acc, sizeof (acc));
5151 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5153 if (CFG_IST(ha, CFG_CTRL_2425) && ha->topology & QL_N_PORT) {
5155 * d_id of zero in a FLOGI accept response in a point to point
5156 * topology triggers evaluation of N Port login initiative.
5158 pkt->pkt_resp_fhdr.d_id = 0;
5160 * An N_Port already logged in with the firmware
5161 * will have the only database entry.
5163 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
5164 tq = ql_loop_id_to_queue(ha, ha->n_port->n_port_handle);
5167 if (tq != NULL) {
5169 * If the target port has initiative send
5170 * up a PLOGI about the new device.
5172 if ((ql_wwn_cmp(ha, (la_wwn_t *)&tq->port_name[0],
5173 (la_wwn_t *)(CFG_IST(ha, CFG_CTRL_2425) ?
5174 &ha->init_ctrl_blk.cb24.port_name[0] :
5175 &ha->init_ctrl_blk.cb.port_name[0])) == 1)) {
5176 ha->send_plogi_timer = 3;
5177 } else {
5178 ha->send_plogi_timer = 0;
5180 pkt->pkt_resp_fhdr.s_id = tq->d_id.b24;
5181 } else {
5183 * An N_Port not logged in with the firmware will not
5184 * have a database entry. We accept anyway and rely
5185 * on a PLOGI from the upper layers to set the d_id
5186 * and s_id.
5188 accept = 1;
5190 } else {
5191 tq = ql_d_id_to_queue(ha, d_id);
5193 if ((tq != NULL) || (accept != 0)) {
5194 /* Build ACC. */
5195 pkt->pkt_state = FC_PKT_SUCCESS;
5196 class3_param = (class_svc_param_t *)&acc.class_3;
5198 acc.ls_code.ls_code = LA_ELS_ACC;
5199 acc.common_service.fcph_version = 0x2006;
5200 if (ha->topology & QL_N_PORT) {
5201 /* clear F_Port indicator */
5202 acc.common_service.cmn_features = 0x0800;
5203 } else {
5204 acc.common_service.cmn_features = 0x1b00;
5206 CFG_IST(ha, CFG_CTRL_24258081) ?
5207 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5208 ha->init_ctrl_blk.cb24.max_frame_length[0],
5209 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
5210 (acc.common_service.rx_bufsize = CHAR_TO_SHORT(
5211 ha->init_ctrl_blk.cb.max_frame_length[0],
5212 ha->init_ctrl_blk.cb.max_frame_length[1]));
5213 acc.common_service.conc_sequences = 0xff;
5214 acc.common_service.relative_offset = 0x03;
5215 acc.common_service.e_d_tov = 0x7d0;
5216 if (accept) {
5217 /* Use the saved N_Port WWNN and WWPN */
5218 if (ha->n_port != NULL) {
5219 bcopy((void *)&ha->n_port->port_name[0],
5220 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5221 bcopy((void *)&ha->n_port->node_name[0],
5222 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5223 /* mark service options invalid */
5224 class3_param->class_valid_svc_opt = 0x0800;
5225 } else {
5226 EL(ha, "ha->n_port is NULL\n");
5227 /* Build RJT. */
5228 acc.ls_code.ls_code = LA_ELS_RJT;
5230 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5231 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5233 } else {
5234 bcopy((void *)&tq->port_name[0],
5235 (void *)&acc.nport_ww_name.raw_wwn[0], 8);
5236 bcopy((void *)&tq->node_name[0],
5237 (void *)&acc.node_ww_name.raw_wwn[0], 8);
5239 class3_param = (class_svc_param_t *)&acc.class_3;
5240 class3_param->class_valid_svc_opt = 0x8800;
5241 class3_param->recipient_ctl = tq->class3_recipient_ctl;
5242 class3_param->rcv_data_size = tq->class3_rcv_data_size;
5243 class3_param->conc_sequences =
5244 tq->class3_conc_sequences;
5245 class3_param->open_sequences_per_exch =
5246 tq->class3_open_sequences_per_exch;
5248 } else {
5249 /* Build RJT. */
5250 acc.ls_code.ls_code = LA_ELS_RJT;
5252 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5253 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5254 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5257 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5258 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5260 if (rval != FC_SUCCESS) {
5261 EL(ha, "failed, rval = %xh\n", rval);
5262 } else {
5263 /*EMPTY*/
5264 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5266 return (rval);
5270 * ql_els_logo
5271 * Issue a extended link service logout request.
5273 * Input:
5274 * ha = adapter state pointer.
5275 * pkt = pointer to fc_packet.
5277 * Returns:
5278 * FC_SUCCESS - the packet was accepted for transport.
5279 * FC_TRANSPORT_ERROR - a transport error occurred.
5281 * Context:
5282 * Kernel context.
5284 static int
5285 ql_els_logo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5287 port_id_t d_id;
5288 ql_tgt_t *tq;
5289 la_els_logo_t acc;
5290 int rval = FC_SUCCESS;
5292 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5293 pkt->pkt_cmd_fhdr.d_id);
5295 bzero(&acc, sizeof (acc));
5296 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5298 tq = ql_d_id_to_queue(ha, d_id);
5299 if (tq) {
5300 DEVICE_QUEUE_LOCK(tq);
5301 if (tq->d_id.b24 == BROADCAST_ADDR) {
5302 DEVICE_QUEUE_UNLOCK(tq);
5303 return (FC_SUCCESS);
5306 tq->flags |= TQF_NEED_AUTHENTICATION;
5308 do {
5309 DEVICE_QUEUE_UNLOCK(tq);
5310 (void) ql_abort_device(ha, tq, 1);
5313 * Wait for commands to drain in F/W (doesn't
5314 * take more than a few milliseconds)
5316 ql_delay(ha, 10000);
5318 DEVICE_QUEUE_LOCK(tq);
5319 } while (tq->outcnt);
5321 DEVICE_QUEUE_UNLOCK(tq);
5324 if (ql_logout_port(ha, d_id) == QL_SUCCESS) {
5325 /* Build ACC. */
5326 acc.ls_code.ls_code = LA_ELS_ACC;
5328 pkt->pkt_state = FC_PKT_SUCCESS;
5329 } else {
5330 /* Build RJT. */
5331 acc.ls_code.ls_code = LA_ELS_RJT;
5333 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5334 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5335 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5338 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5339 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5341 if (rval != FC_SUCCESS) {
5342 EL(ha, "failed, rval = %xh\n", rval);
5343 } else {
5344 /*EMPTY*/
5345 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5347 return (rval);
5351 * ql_els_prli
5352 * Issue a extended link service process login request.
5354 * Input:
5355 * ha = adapter state pointer.
5356 * pkt = pointer to fc_packet.
5358 * Returns:
5359 * FC_SUCCESS - the packet was accepted for transport.
5360 * FC_TRANSPORT_ERROR - a transport error occurred.
5362 * Context:
5363 * Kernel context.
5365 static int
5366 ql_els_prli(ql_adapter_state_t *ha, fc_packet_t *pkt)
5368 ql_tgt_t *tq;
5369 port_id_t d_id;
5370 la_els_prli_t acc;
5371 prli_svc_param_t *param;
5372 ql_srb_t *sp = (ql_srb_t *)pkt->pkt_fca_private;
5373 int rval = FC_SUCCESS;
5375 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5376 pkt->pkt_cmd_fhdr.d_id);
5378 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5380 tq = ql_d_id_to_queue(ha, d_id);
5381 if (tq != NULL) {
5382 (void) ql_get_port_database(ha, tq, PDF_NONE);
5384 if ((ha->topology & QL_N_PORT) &&
5385 (tq->master_state == PD_STATE_PLOGI_COMPLETED)) {
5386 ql_timeout_insert(ha, tq, sp);
5387 ql_start_iocb(ha, sp);
5388 rval = QL_CONSUMED;
5389 } else {
5390 /* Build ACC. */
5391 bzero(&acc, sizeof (acc));
5392 acc.ls_code = LA_ELS_ACC;
5393 acc.page_length = 0x10;
5394 acc.payload_length = tq->prli_payload_length;
5396 param = (prli_svc_param_t *)&acc.service_params[0];
5397 param->type = 0x08;
5398 param->rsvd = 0x00;
5399 param->process_assoc_flags = tq->prli_svc_param_word_0;
5400 param->process_flags = tq->prli_svc_param_word_3;
5402 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5403 (uint8_t *)pkt->pkt_resp, sizeof (acc),
5404 DDI_DEV_AUTOINCR);
5406 pkt->pkt_state = FC_PKT_SUCCESS;
5408 } else {
5409 la_els_rjt_t rjt;
5411 /* Build RJT. */
5412 bzero(&rjt, sizeof (rjt));
5413 rjt.ls_code.ls_code = LA_ELS_RJT;
5415 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5416 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5418 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5419 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5420 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5423 if ((rval != FC_SUCCESS) && (rval != QL_CONSUMED)) {
5424 EL(ha, "failed, rval = %xh\n", rval);
5425 } else {
5426 /*EMPTY*/
5427 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5429 return (rval);
5433 * ql_els_prlo
5434 * Issue a extended link service process logout request.
5436 * Input:
5437 * ha = adapter state pointer.
5438 * pkt = pointer to fc_packet.
5440 * Returns:
5441 * FC_SUCCESS - the packet was accepted for transport.
5442 * FC_TRANSPORT_ERROR - a transport error occurred.
5444 * Context:
5445 * Kernel context.
5447 /* ARGSUSED */
5448 static int
5449 ql_els_prlo(ql_adapter_state_t *ha, fc_packet_t *pkt)
5451 la_els_prli_t acc;
5452 int rval = FC_SUCCESS;
5454 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
5455 pkt->pkt_cmd_fhdr.d_id);
5457 /* Build ACC. */
5458 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&acc,
5459 (uint8_t *)pkt->pkt_cmd, sizeof (acc), DDI_DEV_AUTOINCR);
5461 acc.ls_code = LA_ELS_ACC;
5462 acc.service_params[2] = 1;
5464 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5465 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5467 pkt->pkt_state = FC_PKT_SUCCESS;
5469 if (rval != FC_SUCCESS) {
5470 EL(ha, "failed, rval = %xh\n", rval);
5471 } else {
5472 /*EMPTY*/
5473 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5475 return (rval);
5479 * ql_els_adisc
5480 * Issue a extended link service address discovery request.
5482 * Input:
5483 * ha = adapter state pointer.
5484 * pkt = pointer to fc_packet.
5486 * Returns:
5487 * FC_SUCCESS - the packet was accepted for transport.
5488 * FC_TRANSPORT_ERROR - a transport error occurred.
5490 * Context:
5491 * Kernel context.
5493 static int
5494 ql_els_adisc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5496 ql_dev_id_list_t *list;
5497 uint32_t list_size;
5498 ql_link_t *link;
5499 ql_tgt_t *tq;
5500 ql_lun_t *lq;
5501 port_id_t d_id;
5502 la_els_adisc_t acc;
5503 uint16_t index, loop_id;
5504 ql_mbx_data_t mr;
5505 int rval = FC_SUCCESS;
5507 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5509 bzero(&acc, sizeof (acc));
5510 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5513 * MBC_GET_PORT_DATABASE causes ADISC to go out to
5514 * the device from the firmware
5516 index = ql_alpa_to_index[d_id.b.al_pa];
5517 tq = NULL;
5518 for (link = ha->dev[index].first; link != NULL; link = link->next) {
5519 tq = link->base_address;
5520 if (tq->d_id.b24 == d_id.b24) {
5521 break;
5522 } else {
5523 tq = NULL;
5527 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5528 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
5529 list = (ql_dev_id_list_t *)kmem_zalloc(list_size, KM_SLEEP);
5531 if (list != NULL &&
5532 ql_get_id_list(ha, (caddr_t)list, list_size, &mr) ==
5533 QL_SUCCESS) {
5535 for (index = 0; index < mr.mb[1]; index++) {
5536 ql_dev_list(ha, list, index, &d_id, &loop_id);
5538 if (tq->d_id.b24 == d_id.b24) {
5539 tq->loop_id = loop_id;
5540 break;
5543 } else {
5544 cmn_err(CE_WARN, "!%s(%d) didn't get list for %xh",
5545 QL_NAME, ha->instance, d_id.b24);
5546 tq = NULL;
5548 if ((tq != NULL) && (!VALID_DEVICE_ID(ha, tq->loop_id))) {
5549 cmn_err(CE_WARN, "!%s(%d) no loop_id for adisc %xh",
5550 QL_NAME, ha->instance, tq->d_id.b24);
5551 tq = NULL;
5554 if (list != NULL) {
5555 kmem_free(list, list_size);
5559 if ((tq != NULL) && (VALID_DEVICE_ID(ha, tq->loop_id)) &&
5560 ql_get_port_database(ha, tq, PDF_ADISC) == QL_SUCCESS) {
5562 /* Build ACC. */
5564 DEVICE_QUEUE_LOCK(tq);
5565 tq->flags &= ~TQF_NEED_AUTHENTICATION;
5566 if (tq->prli_svc_param_word_3 & PRLI_W3_RETRY) {
5567 for (link = tq->lun_queues.first; link != NULL;
5568 link = link->next) {
5569 lq = link->base_address;
5571 if (lq->cmd.first != NULL) {
5572 ql_next(ha, lq);
5573 DEVICE_QUEUE_LOCK(tq);
5577 DEVICE_QUEUE_UNLOCK(tq);
5579 acc.ls_code.ls_code = LA_ELS_ACC;
5580 acc.hard_addr.hard_addr = tq->hard_addr.b24;
5582 bcopy((void *)&tq->port_name[0],
5583 (void *)&acc.port_wwn.raw_wwn[0], 8);
5584 bcopy((void *)&tq->node_name[0],
5585 (void *)&acc.node_wwn.raw_wwn[0], 8);
5587 acc.nport_id.port_id = tq->d_id.b24;
5589 pkt->pkt_state = FC_PKT_SUCCESS;
5590 } else {
5591 /* Build RJT. */
5592 acc.ls_code.ls_code = LA_ELS_RJT;
5594 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5595 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5596 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5599 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5600 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5602 if (rval != FC_SUCCESS) {
5603 EL(ha, "failed, rval = %xh\n", rval);
5604 } else {
5605 /*EMPTY*/
5606 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5608 return (rval);
5612 * ql_els_linit
5613 * Issue a extended link service loop initialize request.
5615 * Input:
5616 * ha = adapter state pointer.
5617 * pkt = pointer to fc_packet.
5619 * Returns:
5620 * FC_SUCCESS - the packet was accepted for transport.
5621 * FC_TRANSPORT_ERROR - a transport error occurred.
5623 * Context:
5624 * Kernel context.
5626 static int
5627 ql_els_linit(ql_adapter_state_t *ha, fc_packet_t *pkt)
5629 ddi_dma_cookie_t *cp;
5630 uint32_t cnt;
5631 conv_num_t n;
5632 port_id_t d_id;
5633 int rval = FC_SUCCESS;
5635 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5637 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5638 if (ha->topology & QL_SNS_CONNECTION) {
5639 fc_linit_req_t els;
5640 lfa_cmd_t lfa;
5642 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5643 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5645 /* Setup LFA mailbox command data. */
5646 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5648 lfa.resp_buffer_length[0] = 4;
5650 cp = pkt->pkt_resp_cookie;
5651 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5652 n.size64 = (uint64_t)cp->dmac_laddress;
5653 LITTLE_ENDIAN_64(&n.size64);
5654 } else {
5655 n.size32[0] = LSD(cp->dmac_laddress);
5656 LITTLE_ENDIAN_32(&n.size32[0]);
5657 n.size32[1] = MSD(cp->dmac_laddress);
5658 LITTLE_ENDIAN_32(&n.size32[1]);
5661 /* Set buffer address. */
5662 for (cnt = 0; cnt < 8; cnt++) {
5663 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5666 lfa.subcommand_length[0] = 4;
5667 n.size32[0] = d_id.b24;
5668 LITTLE_ENDIAN_32(&n.size32[0]);
5669 lfa.addr[0] = n.size8[0];
5670 lfa.addr[1] = n.size8[1];
5671 lfa.addr[2] = n.size8[2];
5672 lfa.subcommand[1] = 0x70;
5673 lfa.payload[2] = els.func;
5674 lfa.payload[4] = els.lip_b3;
5675 lfa.payload[5] = els.lip_b4;
5677 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5678 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5679 } else {
5680 pkt->pkt_state = FC_PKT_SUCCESS;
5682 } else {
5683 fc_linit_resp_t rjt;
5685 /* Build RJT. */
5686 bzero(&rjt, sizeof (rjt));
5687 rjt.ls_code.ls_code = LA_ELS_RJT;
5689 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5690 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5692 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5693 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5694 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5697 if (rval != FC_SUCCESS) {
5698 EL(ha, "failed, rval = %xh\n", rval);
5699 } else {
5700 /*EMPTY*/
5701 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5703 return (rval);
5707 * ql_els_lpc
5708 * Issue a extended link service loop control request.
5710 * Input:
5711 * ha = adapter state pointer.
5712 * pkt = pointer to fc_packet.
5714 * Returns:
5715 * FC_SUCCESS - the packet was accepted for transport.
5716 * FC_TRANSPORT_ERROR - a transport error occurred.
5718 * Context:
5719 * Kernel context.
5721 static int
5722 ql_els_lpc(ql_adapter_state_t *ha, fc_packet_t *pkt)
5724 ddi_dma_cookie_t *cp;
5725 uint32_t cnt;
5726 conv_num_t n;
5727 port_id_t d_id;
5728 int rval = FC_SUCCESS;
5730 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5732 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5733 if (ha->topology & QL_SNS_CONNECTION) {
5734 ql_lpc_t els;
5735 lfa_cmd_t lfa;
5737 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5738 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5740 /* Setup LFA mailbox command data. */
5741 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5743 lfa.resp_buffer_length[0] = 4;
5745 cp = pkt->pkt_resp_cookie;
5746 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5747 n.size64 = (uint64_t)(cp->dmac_laddress);
5748 LITTLE_ENDIAN_64(&n.size64);
5749 } else {
5750 n.size32[0] = cp->dmac_address;
5751 LITTLE_ENDIAN_32(&n.size32[0]);
5752 n.size32[1] = 0;
5755 /* Set buffer address. */
5756 for (cnt = 0; cnt < 8; cnt++) {
5757 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5760 lfa.subcommand_length[0] = 20;
5761 n.size32[0] = d_id.b24;
5762 LITTLE_ENDIAN_32(&n.size32[0]);
5763 lfa.addr[0] = n.size8[0];
5764 lfa.addr[1] = n.size8[1];
5765 lfa.addr[2] = n.size8[2];
5766 lfa.subcommand[1] = 0x71;
5767 lfa.payload[4] = els.port_control;
5768 bcopy((void *)&els.lpb[0], (void *)&lfa.payload[6], 32);
5770 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5771 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5772 } else {
5773 pkt->pkt_state = FC_PKT_SUCCESS;
5775 } else {
5776 ql_lpc_resp_t rjt;
5778 /* Build RJT. */
5779 bzero(&rjt, sizeof (rjt));
5780 rjt.ls_code.ls_code = LA_ELS_RJT;
5782 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5783 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5785 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5786 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5787 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5790 if (rval != FC_SUCCESS) {
5791 EL(ha, "failed, rval = %xh\n", rval);
5792 } else {
5793 /*EMPTY*/
5794 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5796 return (rval);
5800 * ql_els_lsts
5801 * Issue a extended link service loop status request.
5803 * Input:
5804 * ha = adapter state pointer.
5805 * pkt = pointer to fc_packet.
5807 * Returns:
5808 * FC_SUCCESS - the packet was accepted for transport.
5809 * FC_TRANSPORT_ERROR - a transport error occurred.
5811 * Context:
5812 * Kernel context.
5814 static int
5815 ql_els_lsts(ql_adapter_state_t *ha, fc_packet_t *pkt)
5817 ddi_dma_cookie_t *cp;
5818 uint32_t cnt;
5819 conv_num_t n;
5820 port_id_t d_id;
5821 int rval = FC_SUCCESS;
5823 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5825 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
5826 if (ha->topology & QL_SNS_CONNECTION) {
5827 fc_lsts_req_t els;
5828 lfa_cmd_t lfa;
5830 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5831 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5833 /* Setup LFA mailbox command data. */
5834 bzero((void *)&lfa, sizeof (lfa_cmd_t));
5836 lfa.resp_buffer_length[0] = 84;
5838 cp = pkt->pkt_resp_cookie;
5839 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
5840 n.size64 = cp->dmac_laddress;
5841 LITTLE_ENDIAN_64(&n.size64);
5842 } else {
5843 n.size32[0] = cp->dmac_address;
5844 LITTLE_ENDIAN_32(&n.size32[0]);
5845 n.size32[1] = 0;
5848 /* Set buffer address. */
5849 for (cnt = 0; cnt < 8; cnt++) {
5850 lfa.resp_buffer_address[cnt] = n.size8[cnt];
5853 lfa.subcommand_length[0] = 2;
5854 n.size32[0] = d_id.b24;
5855 LITTLE_ENDIAN_32(&n.size32[0]);
5856 lfa.addr[0] = n.size8[0];
5857 lfa.addr[1] = n.size8[1];
5858 lfa.addr[2] = n.size8[2];
5859 lfa.subcommand[1] = 0x72;
5861 if (ql_send_lfa(ha, &lfa) != QL_SUCCESS) {
5862 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5863 } else {
5864 pkt->pkt_state = FC_PKT_SUCCESS;
5866 } else {
5867 fc_lsts_resp_t rjt;
5869 /* Build RJT. */
5870 bzero(&rjt, sizeof (rjt));
5871 rjt.lsts_ls_code.ls_code = LA_ELS_RJT;
5873 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
5874 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
5876 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5877 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5878 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5881 if (rval != FC_SUCCESS) {
5882 EL(ha, "failed=%xh\n", rval);
5883 } else {
5884 /*EMPTY*/
5885 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5887 return (rval);
5891 * ql_els_scr
5892 * Issue a extended link service state change registration request.
5894 * Input:
5895 * ha = adapter state pointer.
5896 * pkt = pointer to fc_packet.
5898 * Returns:
5899 * FC_SUCCESS - the packet was accepted for transport.
5900 * FC_TRANSPORT_ERROR - a transport error occurred.
5902 * Context:
5903 * Kernel context.
5905 static int
5906 ql_els_scr(ql_adapter_state_t *ha, fc_packet_t *pkt)
5908 fc_scr_resp_t acc;
5909 int rval = FC_SUCCESS;
5911 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5913 bzero(&acc, sizeof (acc));
5914 if (ha->topology & QL_SNS_CONNECTION) {
5915 fc_scr_req_t els;
5917 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
5918 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
5920 if (ql_send_change_request(ha, els.scr_func) ==
5921 QL_SUCCESS) {
5922 /* Build ACC. */
5923 acc.scr_acc = LA_ELS_ACC;
5925 pkt->pkt_state = FC_PKT_SUCCESS;
5926 } else {
5927 /* Build RJT. */
5928 acc.scr_acc = LA_ELS_RJT;
5930 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5931 pkt->pkt_reason = FC_REASON_HW_ERROR;
5932 EL(ha, "LA_ELS_RJT, FC_REASON_HW_ERROR\n");
5934 } else {
5935 /* Build RJT. */
5936 acc.scr_acc = LA_ELS_RJT;
5938 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5939 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5940 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5943 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5944 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5946 if (rval != FC_SUCCESS) {
5947 EL(ha, "failed, rval = %xh\n", rval);
5948 } else {
5949 /*EMPTY*/
5950 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
5952 return (rval);
5956 * ql_els_rscn
5957 * Issue a extended link service register state
5958 * change notification request.
5960 * Input:
5961 * ha = adapter state pointer.
5962 * pkt = pointer to fc_packet.
5964 * Returns:
5965 * FC_SUCCESS - the packet was accepted for transport.
5966 * FC_TRANSPORT_ERROR - a transport error occurred.
5968 * Context:
5969 * Kernel context.
5971 static int
5972 ql_els_rscn(ql_adapter_state_t *ha, fc_packet_t *pkt)
5974 ql_rscn_resp_t acc;
5975 int rval = FC_SUCCESS;
5977 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
5979 bzero(&acc, sizeof (acc));
5980 if (ha->topology & QL_SNS_CONNECTION) {
5981 /* Build ACC. */
5982 acc.scr_acc = LA_ELS_ACC;
5984 pkt->pkt_state = FC_PKT_SUCCESS;
5985 } else {
5986 /* Build RJT. */
5987 acc.scr_acc = LA_ELS_RJT;
5989 pkt->pkt_state = FC_PKT_TRAN_ERROR;
5990 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
5991 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
5994 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
5995 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
5997 if (rval != FC_SUCCESS) {
5998 EL(ha, "failed, rval = %xh\n", rval);
5999 } else {
6000 /*EMPTY*/
6001 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6003 return (rval);
6007 * ql_els_farp_req
6008 * Issue FC Address Resolution Protocol (FARP)
6009 * extended link service request.
6011 * Note: not supported.
6013 * Input:
6014 * ha = adapter state pointer.
6015 * pkt = pointer to fc_packet.
6017 * Returns:
6018 * FC_SUCCESS - the packet was accepted for transport.
6019 * FC_TRANSPORT_ERROR - a transport error occurred.
6021 * Context:
6022 * Kernel context.
6024 static int
6025 ql_els_farp_req(ql_adapter_state_t *ha, fc_packet_t *pkt)
6027 ql_acc_rjt_t acc;
6028 int rval = FC_SUCCESS;
6030 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6032 bzero(&acc, sizeof (acc));
6034 /* Build ACC. */
6035 acc.ls_code.ls_code = LA_ELS_ACC;
6037 pkt->pkt_state = FC_PKT_SUCCESS;
6039 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6040 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6042 if (rval != FC_SUCCESS) {
6043 EL(ha, "failed, rval = %xh\n", rval);
6044 } else {
6045 /*EMPTY*/
6046 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6048 return (rval);
6052 * ql_els_farp_reply
6053 * Issue FC Address Resolution Protocol (FARP)
6054 * extended link service reply.
6056 * Note: not supported.
6058 * Input:
6059 * ha = adapter state pointer.
6060 * pkt = pointer to fc_packet.
6062 * Returns:
6063 * FC_SUCCESS - the packet was accepted for transport.
6064 * FC_TRANSPORT_ERROR - a transport error occurred.
6066 * Context:
6067 * Kernel context.
6069 /* ARGSUSED */
6070 static int
6071 ql_els_farp_reply(ql_adapter_state_t *ha, fc_packet_t *pkt)
6073 ql_acc_rjt_t acc;
6074 int rval = FC_SUCCESS;
6076 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6078 bzero(&acc, sizeof (acc));
6080 /* Build ACC. */
6081 acc.ls_code.ls_code = LA_ELS_ACC;
6083 pkt->pkt_state = FC_PKT_SUCCESS;
6085 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6086 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6088 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6090 return (rval);
6093 static int
6094 ql_els_rnid(ql_adapter_state_t *ha, fc_packet_t *pkt)
6096 uchar_t *rnid_acc;
6097 port_id_t d_id;
6098 ql_link_t *link;
6099 ql_tgt_t *tq;
6100 uint16_t index;
6101 la_els_rnid_acc_t acc;
6102 la_els_rnid_t *req;
6103 size_t req_len;
6105 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6107 req_len = FCIO_RNID_MAX_DATA_LEN + sizeof (fc_rnid_hdr_t);
6108 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6109 index = ql_alpa_to_index[d_id.b.al_pa];
6111 tq = NULL;
6112 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6113 tq = link->base_address;
6114 if (tq->d_id.b24 == d_id.b24) {
6115 break;
6116 } else {
6117 tq = NULL;
6121 /* Allocate memory for rnid status block */
6122 rnid_acc = kmem_zalloc(req_len, KM_SLEEP);
6124 bzero(&acc, sizeof (acc));
6126 req = (la_els_rnid_t *)pkt->pkt_cmd;
6127 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6128 (ql_send_rnid_els(ha, tq->loop_id, req->data_format, req_len,
6129 (caddr_t)rnid_acc) != QL_SUCCESS)) {
6131 kmem_free(rnid_acc, req_len);
6132 acc.ls_code.ls_code = LA_ELS_RJT;
6134 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6135 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6137 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6138 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6139 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6141 return (FC_FAILURE);
6144 acc.ls_code.ls_code = LA_ELS_ACC;
6145 bcopy(rnid_acc, &acc.hdr, req_len);
6146 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6147 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6149 kmem_free(rnid_acc, req_len);
6150 pkt->pkt_state = FC_PKT_SUCCESS;
6152 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6154 return (FC_SUCCESS);
6157 static int
6158 ql_els_rls(ql_adapter_state_t *ha, fc_packet_t *pkt)
6160 fc_rls_acc_t *rls_acc;
6161 port_id_t d_id;
6162 ql_link_t *link;
6163 ql_tgt_t *tq;
6164 uint16_t index;
6165 la_els_rls_acc_t acc;
6167 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6169 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6170 index = ql_alpa_to_index[d_id.b.al_pa];
6172 tq = NULL;
6173 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6174 tq = link->base_address;
6175 if (tq->d_id.b24 == d_id.b24) {
6176 break;
6177 } else {
6178 tq = NULL;
6182 /* Allocate memory for link error status block */
6183 rls_acc = kmem_zalloc(sizeof (*rls_acc), KM_SLEEP);
6185 bzero(&acc, sizeof (la_els_rls_acc_t));
6187 if ((tq == NULL) || (!VALID_DEVICE_ID(ha, tq->loop_id)) ||
6188 (ql_get_link_status(ha, tq->loop_id, sizeof (*rls_acc),
6189 (caddr_t)rls_acc, 0) != QL_SUCCESS)) {
6191 kmem_free(rls_acc, sizeof (*rls_acc));
6192 acc.ls_code.ls_code = LA_ELS_RJT;
6194 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6195 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6197 pkt->pkt_state = FC_PKT_TRAN_ERROR;
6198 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
6199 EL(ha, "LA_ELS_RJT, FC_REASON_NO_CONNECTION\n");
6201 return (FC_FAILURE);
6204 LITTLE_ENDIAN_32(&rls_acc->rls_link_fail);
6205 LITTLE_ENDIAN_32(&rls_acc->rls_sync_loss);
6206 LITTLE_ENDIAN_32(&rls_acc->rls_sig_loss);
6207 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_word);
6208 LITTLE_ENDIAN_32(&rls_acc->rls_invalid_crc);
6210 acc.ls_code.ls_code = LA_ELS_ACC;
6211 acc.rls_link_params.rls_link_fail = rls_acc->rls_link_fail;
6212 acc.rls_link_params.rls_sync_loss = rls_acc->rls_sync_loss;
6213 acc.rls_link_params.rls_sig_loss = rls_acc->rls_sig_loss;
6214 acc.rls_link_params.rls_invalid_word = rls_acc->rls_invalid_word;
6215 acc.rls_link_params.rls_invalid_crc = rls_acc->rls_invalid_crc;
6216 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&acc,
6217 (uint8_t *)pkt->pkt_resp, sizeof (acc), DDI_DEV_AUTOINCR);
6219 kmem_free(rls_acc, sizeof (*rls_acc));
6220 pkt->pkt_state = FC_PKT_SUCCESS;
6222 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6224 return (FC_SUCCESS);
6227 static int
6228 ql_busy_plogi(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_tgt_t *tq)
6230 port_id_t d_id;
6231 ql_srb_t *sp;
6232 fc_unsol_buf_t *ubp;
6233 ql_link_t *link, *next_link;
6234 int rval = FC_SUCCESS;
6235 int cnt = 5;
6237 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6240 * we need to ensure that q->outcnt == 0, otherwise
6241 * any cmd completed with PKT_PORT_OFFLINE after PLOGI
6242 * will confuse ulps.
6245 DEVICE_QUEUE_LOCK(tq);
6246 do {
6248 * wait for the cmds to get drained. If they
6249 * don't get drained then the transport will
6250 * retry PLOGI after few secs.
6252 if (tq->outcnt != 0) {
6253 rval = FC_TRAN_BUSY;
6254 DEVICE_QUEUE_UNLOCK(tq);
6255 ql_delay(ha, 10000);
6256 DEVICE_QUEUE_LOCK(tq);
6257 cnt--;
6258 if (!cnt) {
6259 cmn_err(CE_NOTE, "!%s(%d) Plogi busy"
6260 " for %xh outcount %xh", QL_NAME,
6261 ha->instance, tq->d_id.b24, tq->outcnt);
6263 } else {
6264 rval = FC_SUCCESS;
6265 break;
6267 } while (cnt > 0);
6268 DEVICE_QUEUE_UNLOCK(tq);
6271 * return, if busy or if the plogi was asynchronous.
6273 if ((rval != FC_SUCCESS) ||
6274 (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
6275 pkt->pkt_comp)) {
6276 QL_PRINT_3(CE_CONT, "(%d): done, busy or async\n",
6277 ha->instance);
6278 return (rval);
6282 * Let us give daemon sufficient time and hopefully
6283 * when transport retries PLOGI, it would have flushed
6284 * callback queue.
6286 TASK_DAEMON_LOCK(ha);
6287 for (link = ha->callback_queue.first; link != NULL;
6288 link = next_link) {
6289 next_link = link->next;
6290 sp = link->base_address;
6291 if (sp->flags & SRB_UB_CALLBACK) {
6292 ubp = ha->ub_array[sp->handle];
6293 d_id.b24 = ubp->ub_frame.s_id;
6294 } else {
6295 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
6297 if (tq->d_id.b24 == d_id.b24) {
6298 cmn_err(CE_NOTE, "!%s(%d) Plogi busy for %xh", QL_NAME,
6299 ha->instance, tq->d_id.b24);
6300 rval = FC_TRAN_BUSY;
6301 break;
6304 TASK_DAEMON_UNLOCK(ha);
6306 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6308 return (rval);
6312 * ql_login_port
6313 * Logs in a device if not already logged in.
6315 * Input:
6316 * ha = adapter state pointer.
6317 * d_id = 24 bit port ID.
6318 * DEVICE_QUEUE_LOCK must be released.
6320 * Returns:
6321 * QL local function return status code.
6323 * Context:
6324 * Kernel context.
6326 static int
6327 ql_login_port(ql_adapter_state_t *ha, port_id_t d_id)
6329 ql_adapter_state_t *vha;
6330 ql_link_t *link;
6331 uint16_t index;
6332 ql_tgt_t *tq, *tq2;
6333 uint16_t loop_id, first_loop_id, last_loop_id;
6334 int rval = QL_SUCCESS;
6336 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6337 d_id.b24);
6339 /* Get head queue index. */
6340 index = ql_alpa_to_index[d_id.b.al_pa];
6342 /* Check for device already has a queue. */
6343 tq = NULL;
6344 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6345 tq = link->base_address;
6346 if (tq->d_id.b24 == d_id.b24) {
6347 loop_id = tq->loop_id;
6348 break;
6349 } else {
6350 tq = NULL;
6354 /* Let's stop issuing any IO and unsolicited logo */
6355 if ((tq != NULL) && (!(ddi_in_panic()))) {
6356 DEVICE_QUEUE_LOCK(tq);
6357 tq->flags |= (TQF_QUEUE_SUSPENDED | TQF_PLOGI_PROGRS);
6358 tq->flags &= ~TQF_RSCN_RCVD;
6359 DEVICE_QUEUE_UNLOCK(tq);
6361 if ((tq != NULL) && (tq->loop_id & PORT_LOST_ID) &&
6362 !(tq->flags & TQF_FABRIC_DEVICE)) {
6363 loop_id = (uint16_t)(tq->loop_id & ~PORT_LOST_ID);
6366 /* Special case for Nameserver */
6367 if (d_id.b24 == 0xFFFFFC) {
6368 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
6369 SNS_24XX_HDL : SIMPLE_NAME_SERVER_LOOP_ID);
6370 if (tq == NULL) {
6371 ADAPTER_STATE_LOCK(ha);
6372 tq = ql_dev_init(ha, d_id, loop_id);
6373 ADAPTER_STATE_UNLOCK(ha);
6374 if (tq == NULL) {
6375 EL(ha, "failed=%xh, d_id=%xh\n",
6376 QL_FUNCTION_FAILED, d_id.b24);
6377 return (QL_FUNCTION_FAILED);
6380 if (!(CFG_IST(ha, CFG_CTRL_8021))) {
6381 rval = ql_login_fabric_port(ha, tq, loop_id);
6382 if (rval == QL_SUCCESS) {
6383 tq->loop_id = loop_id;
6384 tq->flags |= TQF_FABRIC_DEVICE;
6385 (void) ql_get_port_database(ha, tq, PDF_NONE);
6387 } else {
6388 ha->topology = (uint8_t)
6389 (ha->topology | QL_SNS_CONNECTION);
6391 /* Check for device already logged in. */
6392 } else if (tq != NULL && VALID_DEVICE_ID(ha, loop_id)) {
6393 if (tq->flags & TQF_FABRIC_DEVICE) {
6394 rval = ql_login_fabric_port(ha, tq, loop_id);
6395 if (rval == QL_PORT_ID_USED) {
6396 rval = QL_SUCCESS;
6398 } else if (LOCAL_LOOP_ID(loop_id)) {
6399 rval = ql_login_lport(ha, tq, loop_id, (uint16_t)
6400 (tq->flags & TQF_INITIATOR_DEVICE ?
6401 LLF_NONE : LLF_PLOGI));
6402 if (rval == QL_SUCCESS) {
6403 DEVICE_QUEUE_LOCK(tq);
6404 tq->loop_id = loop_id;
6405 DEVICE_QUEUE_UNLOCK(tq);
6408 } else if (ha->topology & QL_SNS_CONNECTION) {
6409 /* Locate unused loop ID. */
6410 if (CFG_IST(ha, CFG_CTRL_24258081)) {
6411 first_loop_id = 0;
6412 last_loop_id = LAST_N_PORT_HDL;
6413 } else if (ha->topology & QL_F_PORT) {
6414 first_loop_id = 0;
6415 last_loop_id = SNS_LAST_LOOP_ID;
6416 } else {
6417 first_loop_id = SNS_FIRST_LOOP_ID;
6418 last_loop_id = SNS_LAST_LOOP_ID;
6421 /* Acquire adapter state lock. */
6422 ADAPTER_STATE_LOCK(ha);
6424 tq = ql_dev_init(ha, d_id, PORT_NO_LOOP_ID);
6425 if (tq == NULL) {
6426 EL(ha, "failed=%xh, d_id=%xh\n", QL_FUNCTION_FAILED,
6427 d_id.b24);
6429 ADAPTER_STATE_UNLOCK(ha);
6431 return (QL_FUNCTION_FAILED);
6434 rval = QL_FUNCTION_FAILED;
6435 loop_id = ha->pha->free_loop_id++;
6436 for (index = (uint16_t)(last_loop_id - first_loop_id); index;
6437 index--) {
6438 if (loop_id < first_loop_id ||
6439 loop_id > last_loop_id) {
6440 loop_id = first_loop_id;
6441 ha->pha->free_loop_id = (uint16_t)
6442 (loop_id + 1);
6445 /* Bypass if loop ID used. */
6446 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
6447 tq2 = ql_loop_id_to_queue(vha, loop_id);
6448 if (tq2 != NULL && tq2 != tq) {
6449 break;
6452 if (vha != NULL || RESERVED_LOOP_ID(ha, loop_id) ||
6453 loop_id == ha->loop_id) {
6454 loop_id = ha->pha->free_loop_id++;
6455 continue;
6458 ADAPTER_STATE_UNLOCK(ha);
6459 rval = ql_login_fabric_port(ha, tq, loop_id);
6462 * If PORT_ID_USED is returned
6463 * the login_fabric_port() updates
6464 * with the correct loop ID
6466 switch (rval) {
6467 case QL_PORT_ID_USED:
6469 * use f/w handle and try to
6470 * login again.
6472 ADAPTER_STATE_LOCK(ha);
6473 ha->pha->free_loop_id--;
6474 ADAPTER_STATE_UNLOCK(ha);
6475 loop_id = tq->loop_id;
6476 break;
6478 case QL_SUCCESS:
6479 tq->flags |= TQF_FABRIC_DEVICE;
6480 (void) ql_get_port_database(ha,
6481 tq, PDF_NONE);
6482 index = 1;
6483 break;
6485 case QL_LOOP_ID_USED:
6486 tq->loop_id = PORT_NO_LOOP_ID;
6487 loop_id = ha->pha->free_loop_id++;
6488 break;
6490 case QL_ALL_IDS_IN_USE:
6491 tq->loop_id = PORT_NO_LOOP_ID;
6492 index = 1;
6493 break;
6495 default:
6496 tq->loop_id = PORT_NO_LOOP_ID;
6497 index = 1;
6498 break;
6501 ADAPTER_STATE_LOCK(ha);
6504 ADAPTER_STATE_UNLOCK(ha);
6505 } else {
6506 rval = QL_FUNCTION_FAILED;
6509 if (rval != QL_SUCCESS) {
6510 EL(ha, "failed=%xh, d_id=%xh\n", rval, d_id.b24);
6511 } else {
6512 EL(ha, "d_id=%xh, loop_id=%xh, "
6513 "wwpn=%02x%02x%02x%02x%02x%02x%02x%02xh\n", tq->d_id.b24,
6514 tq->loop_id, tq->port_name[0], tq->port_name[1],
6515 tq->port_name[2], tq->port_name[3], tq->port_name[4],
6516 tq->port_name[5], tq->port_name[6], tq->port_name[7]);
6518 return (rval);
6522 * ql_login_fabric_port
6523 * Issue login fabric port mailbox command.
6525 * Input:
6526 * ha: adapter state pointer.
6527 * tq: target queue pointer.
6528 * loop_id: FC Loop ID.
6530 * Returns:
6531 * ql local function return status code.
6533 * Context:
6534 * Kernel context.
6536 static int
6537 ql_login_fabric_port(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t loop_id)
6539 int rval;
6540 int index;
6541 int retry = 0;
6542 port_id_t d_id;
6543 ql_tgt_t *newq;
6544 ql_mbx_data_t mr;
6546 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
6547 tq->d_id.b24);
6550 * QL_PARAMETER_ERROR also means the firmware is
6551 * not able to allocate PCB entry due to resource
6552 * issues, or collision.
6554 do {
6555 rval = ql_login_fport(ha, tq, loop_id, LFF_NONE, &mr);
6556 if ((rval == QL_PARAMETER_ERROR) ||
6557 ((rval == QL_COMMAND_ERROR) && (mr.mb[1] == 2 ||
6558 mr.mb[1] == 3 || mr.mb[1] == 7 || mr.mb[1] == 0xd))) {
6559 retry++;
6560 drv_usecwait(10 * MILLISEC);
6561 } else {
6562 break;
6564 } while (retry < 5);
6566 switch (rval) {
6567 case QL_SUCCESS:
6568 tq->loop_id = loop_id;
6569 break;
6571 case QL_PORT_ID_USED:
6573 * This Loop ID should NOT be in use in drivers
6575 newq = ql_loop_id_to_queue(ha, mr.mb[1]);
6577 if (newq != NULL && newq != tq && tq->logout_sent == 0) {
6578 cmn_err(CE_WARN, "ql_login_fabric_port(%d): logout of "
6579 "dup loop_id=%xh, d_id=%xh", ha->instance,
6580 newq->loop_id, newq->d_id.b24);
6581 ql_send_logo(ha, newq, NULL);
6584 tq->loop_id = mr.mb[1];
6585 break;
6587 case QL_LOOP_ID_USED:
6588 d_id.b.al_pa = LSB(mr.mb[2]);
6589 d_id.b.area = MSB(mr.mb[2]);
6590 d_id.b.domain = LSB(mr.mb[1]);
6592 newq = ql_d_id_to_queue(ha, d_id);
6593 if (newq && (newq->loop_id != loop_id)) {
6595 * This should NEVER ever happen; but this
6596 * code is needed to bail out when the worst
6597 * case happens - or as used to happen before
6599 QL_PRINT_2(CE_CONT, "(%d,%d): Loop ID is now "
6600 "reassigned; old pairs: [%xh, %xh] and [%xh, %xh];"
6601 "new pairs: [%xh, unknown] and [%xh, %xh]\n",
6602 ha->instance, ha->vp_index, tq->d_id.b24, loop_id,
6603 newq->d_id.b24, newq->loop_id, tq->d_id.b24,
6604 newq->d_id.b24, loop_id);
6606 if ((newq->d_id.b24 & 0xff) != (d_id.b24 & 0xff)) {
6607 ADAPTER_STATE_LOCK(ha);
6609 index = ql_alpa_to_index[newq->d_id.b.al_pa];
6610 ql_add_link_b(&ha->dev[index], &newq->device);
6612 newq->d_id.b24 = d_id.b24;
6614 index = ql_alpa_to_index[d_id.b.al_pa];
6615 ql_add_link_b(&ha->dev[index], &newq->device);
6617 ADAPTER_STATE_UNLOCK(ha);
6620 (void) ql_get_port_database(ha, newq, PDF_NONE);
6625 * Invalidate the loop ID for the
6626 * us to obtain a new one.
6628 tq->loop_id = PORT_NO_LOOP_ID;
6629 break;
6631 case QL_ALL_IDS_IN_USE:
6632 rval = QL_FUNCTION_FAILED;
6633 EL(ha, "no loop id's available\n");
6634 break;
6636 default:
6637 if (rval == QL_COMMAND_ERROR) {
6638 switch (mr.mb[1]) {
6639 case 2:
6640 case 3:
6641 rval = QL_MEMORY_ALLOC_FAILED;
6642 break;
6644 case 4:
6645 rval = QL_FUNCTION_TIMEOUT;
6646 break;
6647 case 7:
6648 rval = QL_FABRIC_NOT_INITIALIZED;
6649 break;
6650 default:
6651 EL(ha, "cmd rtn; mb1=%xh\n", mr.mb[1]);
6652 break;
6654 } else {
6655 cmn_err(CE_WARN, "%s(%d): login fabric port failed"
6656 " D_ID=%xh, rval=%xh, mb1=%xh", QL_NAME,
6657 ha->instance, tq->d_id.b24, rval, mr.mb[1]);
6659 break;
6662 if (rval != QL_SUCCESS && rval != QL_PORT_ID_USED &&
6663 rval != QL_LOOP_ID_USED) {
6664 EL(ha, "failed=%xh\n", rval);
6665 } else {
6666 /*EMPTY*/
6667 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6669 return (rval);
6673 * ql_logout_port
6674 * Logs out a device if possible.
6676 * Input:
6677 * ha: adapter state pointer.
6678 * d_id: 24 bit port ID.
6680 * Returns:
6681 * QL local function return status code.
6683 * Context:
6684 * Kernel context.
6686 static int
6687 ql_logout_port(ql_adapter_state_t *ha, port_id_t d_id)
6689 ql_link_t *link;
6690 ql_tgt_t *tq;
6691 uint16_t index;
6693 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6695 /* Get head queue index. */
6696 index = ql_alpa_to_index[d_id.b.al_pa];
6698 /* Get device queue. */
6699 tq = NULL;
6700 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6701 tq = link->base_address;
6702 if (tq->d_id.b24 == d_id.b24) {
6703 break;
6704 } else {
6705 tq = NULL;
6709 if (tq != NULL && tq->flags & TQF_FABRIC_DEVICE) {
6710 (void) ql_logout_fabric_port(ha, tq);
6711 tq->loop_id = PORT_NO_LOOP_ID;
6714 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6716 return (QL_SUCCESS);
6720 * ql_dev_init
6721 * Initialize/allocate device queue.
6723 * Input:
6724 * ha: adapter state pointer.
6725 * d_id: device destination ID
6726 * loop_id: device loop ID
6727 * ADAPTER_STATE_LOCK must be already obtained.
6729 * Returns:
6730 * NULL = failure
6732 * Context:
6733 * Kernel context.
6735 ql_tgt_t *
6736 ql_dev_init(ql_adapter_state_t *ha, port_id_t d_id, uint16_t loop_id)
6738 ql_link_t *link;
6739 uint16_t index;
6740 ql_tgt_t *tq;
6742 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh, loop_id=%xh\n",
6743 ha->instance, d_id.b24, loop_id);
6745 index = ql_alpa_to_index[d_id.b.al_pa];
6747 /* If device queue exists, set proper loop ID. */
6748 tq = NULL;
6749 for (link = ha->dev[index].first; link != NULL; link = link->next) {
6750 tq = link->base_address;
6751 if (tq->d_id.b24 == d_id.b24) {
6752 tq->loop_id = loop_id;
6754 /* Reset port down retry count. */
6755 tq->port_down_retry_count = ha->port_down_retry_count;
6756 tq->qfull_retry_count = ha->qfull_retry_count;
6758 break;
6759 } else {
6760 tq = NULL;
6764 /* If device does not have queue. */
6765 if (tq == NULL) {
6766 tq = (ql_tgt_t *)kmem_zalloc(sizeof (ql_tgt_t), KM_SLEEP);
6767 if (tq != NULL) {
6769 * mutex to protect the device queue,
6770 * does not block interrupts.
6772 mutex_init(&tq->mutex, NULL, MUTEX_DRIVER,
6773 (ha->iflags & IFLG_INTR_AIF) ?
6774 (void *)(uintptr_t)ha->intr_pri :
6775 (void *)(uintptr_t)ha->iblock_cookie);
6777 tq->d_id.b24 = d_id.b24;
6778 tq->loop_id = loop_id;
6779 tq->device.base_address = tq;
6780 tq->iidma_rate = IIDMA_RATE_INIT;
6782 /* Reset port down retry count. */
6783 tq->port_down_retry_count = ha->port_down_retry_count;
6784 tq->qfull_retry_count = ha->qfull_retry_count;
6786 /* Add device to device queue. */
6787 ql_add_link_b(&ha->dev[index], &tq->device);
6791 if (tq == NULL) {
6792 EL(ha, "failed, d_id=%xh, loop_id=%xh\n", d_id.b24, loop_id);
6793 } else {
6794 /*EMPTY*/
6795 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6797 return (tq);
6801 * ql_dev_free
6802 * Remove queue from device list and frees resources used by queue.
6804 * Input:
6805 * ha: adapter state pointer.
6806 * tq: target queue pointer.
6807 * ADAPTER_STATE_LOCK must be already obtained.
6809 * Context:
6810 * Kernel context.
6812 void
6813 ql_dev_free(ql_adapter_state_t *ha, ql_tgt_t *tq)
6815 ql_link_t *link;
6816 uint16_t index;
6817 ql_lun_t *lq;
6819 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6821 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6822 lq = link->base_address;
6823 if (lq->cmd.first != NULL) {
6824 return;
6828 if (tq->outcnt == 0) {
6829 /* Get head queue index. */
6830 index = ql_alpa_to_index[tq->d_id.b.al_pa];
6831 for (link = ha->dev[index].first; link != NULL;
6832 link = link->next) {
6833 if (link->base_address == tq) {
6834 ql_remove_link(&ha->dev[index], link);
6836 link = tq->lun_queues.first;
6837 while (link != NULL) {
6838 lq = link->base_address;
6839 link = link->next;
6841 ql_remove_link(&tq->lun_queues,
6842 &lq->link);
6843 kmem_free(lq, sizeof (ql_lun_t));
6846 mutex_destroy(&tq->mutex);
6847 kmem_free(tq, sizeof (ql_tgt_t));
6848 break;
6853 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6857 * ql_lun_queue
6858 * Allocate LUN queue if does not exists.
6860 * Input:
6861 * ha: adapter state pointer.
6862 * tq: target queue.
6863 * lun: LUN number.
6865 * Returns:
6866 * NULL = failure
6868 * Context:
6869 * Kernel context.
6871 static ql_lun_t *
6872 ql_lun_queue(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t lun)
6874 ql_lun_t *lq;
6875 ql_link_t *link;
6877 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6879 /* Fast path. */
6880 if (tq->last_lun_queue != NULL && tq->last_lun_queue->lun_no == lun) {
6881 QL_PRINT_3(CE_CONT, "(%d): fast done\n", ha->instance);
6882 return (tq->last_lun_queue);
6885 if (lun >= MAX_LUNS) {
6886 EL(ha, "Exceeded MAX_LUN=%d, lun=%d\n", MAX_LUNS, lun);
6887 return (NULL);
6889 /* If device queue exists, set proper loop ID. */
6890 lq = NULL;
6891 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
6892 lq = link->base_address;
6893 if (lq->lun_no == lun) {
6894 QL_PRINT_3(CE_CONT, "(%d): found done\n", ha->instance);
6895 tq->last_lun_queue = lq;
6896 return (lq);
6900 /* If queue does exist. */
6901 lq = (ql_lun_t *)kmem_zalloc(sizeof (ql_lun_t), KM_SLEEP);
6903 /* Initialize LUN queue. */
6904 if (lq != NULL) {
6905 lq->link.base_address = lq;
6907 lq->lun_no = lun;
6908 lq->target_queue = tq;
6910 DEVICE_QUEUE_LOCK(tq);
6911 ql_add_link_b(&tq->lun_queues, &lq->link);
6912 DEVICE_QUEUE_UNLOCK(tq);
6913 tq->last_lun_queue = lq;
6916 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
6918 return (lq);
6922 * ql_fcp_scsi_cmd
6923 * Process fibre channel (FCP) SCSI protocol commands.
6925 * Input:
6926 * ha = adapter state pointer.
6927 * pkt = pointer to fc_packet.
6928 * sp = srb pointer.
6930 * Returns:
6931 * FC_SUCCESS - the packet was accepted for transport.
6932 * FC_TRANSPORT_ERROR - a transport error occurred.
6934 * Context:
6935 * Kernel context.
6937 static int
6938 ql_fcp_scsi_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
6940 port_id_t d_id;
6941 ql_tgt_t *tq;
6942 uint64_t *ptr;
6943 uint16_t lun;
6945 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
6947 tq = (ql_tgt_t *)pkt->pkt_fca_device;
6948 if (tq == NULL) {
6949 d_id.r.rsvd_1 = 0;
6950 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
6951 tq = ql_d_id_to_queue(ha, d_id);
6954 sp->fcp = (struct fcp_cmd *)pkt->pkt_cmd;
6955 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
6956 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
6958 if (tq != NULL &&
6959 (sp->lun_queue = ql_lun_queue(ha, tq, lun)) != NULL) {
6962 * zero out FCP response; 24 Bytes
6964 ptr = (uint64_t *)pkt->pkt_resp;
6965 *ptr++ = 0; *ptr++ = 0; *ptr++ = 0;
6967 /* Handle task management function. */
6968 if ((sp->fcp->fcp_cntl.cntl_kill_tsk |
6969 sp->fcp->fcp_cntl.cntl_clr_aca |
6970 sp->fcp->fcp_cntl.cntl_reset_tgt |
6971 sp->fcp->fcp_cntl.cntl_reset_lun |
6972 sp->fcp->fcp_cntl.cntl_clr_tsk |
6973 sp->fcp->fcp_cntl.cntl_abort_tsk) != 0) {
6974 ql_task_mgmt(ha, tq, pkt, sp);
6975 } else {
6976 ha->pha->xioctl->IosRequested++;
6977 ha->pha->xioctl->BytesRequested += (uint32_t)
6978 sp->fcp->fcp_data_len;
6981 * Setup for commands with data transfer
6983 sp->iocb = ha->fcp_cmd;
6984 sp->req_cnt = 1;
6985 if (sp->fcp->fcp_data_len != 0) {
6987 * FCP data is bound to pkt_data_dma
6989 if (sp->fcp->fcp_cntl.cntl_write_data) {
6990 (void) ddi_dma_sync(pkt->pkt_data_dma,
6991 0, 0, DDI_DMA_SYNC_FORDEV);
6994 /* Setup IOCB count. */
6995 if (pkt->pkt_data_cookie_cnt > ha->cmd_segs &&
6996 (!CFG_IST(ha, CFG_CTRL_8021) ||
6997 sp->sg_dma.dma_handle == NULL)) {
6998 uint32_t cnt;
7000 cnt = pkt->pkt_data_cookie_cnt -
7001 ha->cmd_segs;
7002 sp->req_cnt = (uint16_t)
7003 (cnt / ha->cmd_cont_segs);
7004 if (cnt % ha->cmd_cont_segs) {
7005 sp->req_cnt = (uint16_t)
7006 (sp->req_cnt + 2);
7007 } else {
7008 sp->req_cnt++;
7012 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7014 return (ql_start_cmd(ha, tq, pkt, sp));
7016 } else {
7017 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7018 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7020 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7021 ql_awaken_task_daemon(ha, sp, 0, 0);
7024 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7026 return (FC_SUCCESS);
7030 * ql_task_mgmt
7031 * Task management function processor.
7033 * Input:
7034 * ha: adapter state pointer.
7035 * tq: target queue pointer.
7036 * pkt: pointer to fc_packet.
7037 * sp: SRB pointer.
7039 * Context:
7040 * Kernel context.
7042 static void
7043 ql_task_mgmt(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7044 ql_srb_t *sp)
7046 fcp_rsp_t *fcpr;
7047 struct fcp_rsp_info *rsp;
7048 uint16_t lun;
7050 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7052 fcpr = (fcp_rsp_t *)pkt->pkt_resp;
7053 rsp = (struct fcp_rsp_info *)pkt->pkt_resp + sizeof (fcp_rsp_t);
7055 bzero(fcpr, pkt->pkt_rsplen);
7057 fcpr->fcp_u.fcp_status.rsp_len_set = 1;
7058 fcpr->fcp_response_len = 8;
7059 lun = CHAR_TO_SHORT(lobyte(sp->fcp->fcp_ent_addr.ent_addr_0),
7060 hibyte(sp->fcp->fcp_ent_addr.ent_addr_0));
7062 if (sp->fcp->fcp_cntl.cntl_clr_aca) {
7063 if (ql_clear_aca(ha, tq, lun) != QL_SUCCESS) {
7064 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7066 } else if (sp->fcp->fcp_cntl.cntl_reset_lun) {
7067 if (ql_lun_reset(ha, tq, lun) != QL_SUCCESS) {
7068 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7070 } else if (sp->fcp->fcp_cntl.cntl_reset_tgt) {
7071 if (ql_target_reset(ha, tq, ha->loop_reset_delay) !=
7072 QL_SUCCESS) {
7073 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7075 } else if (sp->fcp->fcp_cntl.cntl_clr_tsk) {
7076 if (ql_clear_task_set(ha, tq, lun) != QL_SUCCESS) {
7077 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7079 } else if (sp->fcp->fcp_cntl.cntl_abort_tsk) {
7080 if (ql_abort_task_set(ha, tq, lun) != QL_SUCCESS) {
7081 rsp->rsp_code = FCP_TASK_MGMT_FAILED;
7083 } else {
7084 rsp->rsp_code = FCP_TASK_MGMT_NOT_SUPPTD;
7087 pkt->pkt_state = FC_PKT_SUCCESS;
7089 /* Do command callback. */
7090 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7091 ql_awaken_task_daemon(ha, sp, 0, 0);
7094 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7098 * ql_fcp_ip_cmd
7099 * Process fibre channel (FCP) Internet (IP) protocols commands.
7101 * Input:
7102 * ha: adapter state pointer.
7103 * pkt: pointer to fc_packet.
7104 * sp: SRB pointer.
7106 * Returns:
7107 * FC_SUCCESS - the packet was accepted for transport.
7108 * FC_TRANSPORT_ERROR - a transport error occurred.
7110 * Context:
7111 * Kernel context.
7113 static int
7114 ql_fcp_ip_cmd(ql_adapter_state_t *ha, fc_packet_t *pkt, ql_srb_t *sp)
7116 port_id_t d_id;
7117 ql_tgt_t *tq;
7119 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7121 tq = (ql_tgt_t *)pkt->pkt_fca_device;
7122 if (tq == NULL) {
7123 d_id.r.rsvd_1 = 0;
7124 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7125 tq = ql_d_id_to_queue(ha, d_id);
7128 if (tq != NULL && (sp->lun_queue = ql_lun_queue(ha, tq, 0)) != NULL) {
7130 * IP data is bound to pkt_cmd_dma
7132 (void) ddi_dma_sync(pkt->pkt_cmd_dma,
7133 0, 0, DDI_DMA_SYNC_FORDEV);
7135 /* Setup IOCB count. */
7136 sp->iocb = ha->ip_cmd;
7137 if (pkt->pkt_cmd_cookie_cnt > ha->cmd_segs) {
7138 uint32_t cnt;
7140 cnt = pkt->pkt_cmd_cookie_cnt - ha->cmd_segs;
7141 sp->req_cnt = (uint16_t)(cnt / ha->cmd_cont_segs);
7142 if (cnt % ha->cmd_cont_segs) {
7143 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7144 } else {
7145 sp->req_cnt++;
7147 } else {
7148 sp->req_cnt = 1;
7150 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7152 return (ql_start_cmd(ha, tq, pkt, sp));
7153 } else {
7154 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7155 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7157 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp)
7158 ql_awaken_task_daemon(ha, sp, 0, 0);
7161 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7163 return (FC_SUCCESS);
7167 * ql_fc_services
7168 * Process fibre channel services (name server).
7170 * Input:
7171 * ha: adapter state pointer.
7172 * pkt: pointer to fc_packet.
7174 * Returns:
7175 * FC_SUCCESS - the packet was accepted for transport.
7176 * FC_TRANSPORT_ERROR - a transport error occurred.
7178 * Context:
7179 * Kernel context.
7181 static int
7182 ql_fc_services(ql_adapter_state_t *ha, fc_packet_t *pkt)
7184 uint32_t cnt;
7185 fc_ct_header_t hdr;
7186 la_els_rjt_t rjt;
7187 port_id_t d_id;
7188 ql_tgt_t *tq;
7189 ql_srb_t *sp;
7190 int rval;
7192 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7194 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&hdr,
7195 (uint8_t *)pkt->pkt_cmd, sizeof (hdr), DDI_DEV_AUTOINCR);
7197 bzero(&rjt, sizeof (rjt));
7199 /* Do some sanity checks */
7200 cnt = (uint32_t)((uint32_t)(hdr.ct_aiusize * 4) +
7201 sizeof (fc_ct_header_t));
7202 if (cnt > (uint32_t)pkt->pkt_rsplen) {
7203 EL(ha, "FC_ELS_MALFORMED, cnt=%xh, size=%xh\n", cnt,
7204 pkt->pkt_rsplen);
7205 return (FC_ELS_MALFORMED);
7208 switch (hdr.ct_fcstype) {
7209 case FCSTYPE_DIRECTORY:
7210 case FCSTYPE_MGMTSERVICE:
7211 /* An FCA must make sure that the header is in big endian */
7212 ql_cthdr_endian(pkt->pkt_cmd_acc, pkt->pkt_cmd, B_FALSE);
7214 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7215 tq = ql_d_id_to_queue(ha, d_id);
7216 sp = (ql_srb_t *)pkt->pkt_fca_private;
7217 if (tq == NULL ||
7218 (sp->lun_queue = ql_lun_queue(ha, tq, 0)) == NULL) {
7219 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7220 pkt->pkt_reason = FC_REASON_NO_CONNECTION;
7221 rval = QL_SUCCESS;
7222 break;
7226 * Services data is bound to pkt_cmd_dma
7228 (void) ddi_dma_sync(pkt->pkt_cmd_dma, 0, 0,
7229 DDI_DMA_SYNC_FORDEV);
7231 sp->flags |= SRB_MS_PKT;
7232 sp->retry_count = 32;
7234 /* Setup IOCB count. */
7235 sp->iocb = ha->ms_cmd;
7236 if (pkt->pkt_resp_cookie_cnt > MS_DATA_SEGMENTS) {
7237 cnt = pkt->pkt_resp_cookie_cnt - MS_DATA_SEGMENTS;
7238 sp->req_cnt =
7239 (uint16_t)(cnt / CONT_TYPE_1_DATA_SEGMENTS);
7240 if (cnt % CONT_TYPE_1_DATA_SEGMENTS) {
7241 sp->req_cnt = (uint16_t)(sp->req_cnt + 2);
7242 } else {
7243 sp->req_cnt++;
7245 } else {
7246 sp->req_cnt = 1;
7248 rval = ql_start_cmd(ha, tq, pkt, sp);
7250 QL_PRINT_3(CE_CONT, "(%d): done, ql_start_cmd=%xh\n",
7251 ha->instance, rval);
7253 return (rval);
7255 default:
7256 EL(ha, "unknown fcstype=%xh\n", hdr.ct_fcstype);
7257 rval = QL_FUNCTION_PARAMETER_ERROR;
7258 break;
7261 if (rval != QL_SUCCESS) {
7262 /* Build RJT. */
7263 rjt.ls_code.ls_code = LA_ELS_RJT;
7264 rjt.reason = FC_REASON_CMD_UNSUPPORTED;
7266 ddi_rep_put8(pkt->pkt_resp_acc, (uint8_t *)&rjt,
7267 (uint8_t *)pkt->pkt_resp, sizeof (rjt), DDI_DEV_AUTOINCR);
7269 pkt->pkt_state = FC_PKT_LOCAL_RJT;
7270 pkt->pkt_reason = FC_REASON_UNSUPPORTED;
7271 EL(ha, "LA_ELS_RJT, FC_REASON_UNSUPPORTED\n");
7274 /* Do command callback. */
7275 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) && pkt->pkt_comp) {
7276 ql_awaken_task_daemon(ha, (ql_srb_t *)pkt->pkt_fca_private,
7277 0, 0);
7280 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7282 return (FC_SUCCESS);
7286 * ql_cthdr_endian
7287 * Change endianess of ct passthrough header and payload.
7289 * Input:
7290 * acc_handle: DMA buffer access handle.
7291 * ct_hdr: Pointer to header.
7292 * restore: Restore first flag.
7294 * Context:
7295 * Interrupt or Kernel context, no mailbox commands allowed.
7297 void
7298 ql_cthdr_endian(ddi_acc_handle_t acc_handle, caddr_t ct_hdr,
7299 boolean_t restore)
7301 uint8_t i, *bp;
7302 fc_ct_header_t hdr;
7303 uint32_t *hdrp = (uint32_t *)&hdr;
7305 ddi_rep_get8(acc_handle, (uint8_t *)&hdr,
7306 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7308 if (restore) {
7309 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7310 *hdrp = BE_32(*hdrp);
7311 hdrp++;
7315 if (hdr.ct_fcstype == FCSTYPE_DIRECTORY) {
7316 bp = (uint8_t *)ct_hdr + sizeof (fc_ct_header_t);
7318 switch (hdr.ct_cmdrsp) {
7319 case NS_GA_NXT:
7320 case NS_GPN_ID:
7321 case NS_GNN_ID:
7322 case NS_GCS_ID:
7323 case NS_GFT_ID:
7324 case NS_GSPN_ID:
7325 case NS_GPT_ID:
7326 case NS_GID_FT:
7327 case NS_GID_PT:
7328 case NS_RPN_ID:
7329 case NS_RNN_ID:
7330 case NS_RSPN_ID:
7331 case NS_DA_ID:
7332 BIG_ENDIAN_32(bp);
7333 break;
7334 case NS_RFT_ID:
7335 case NS_RCS_ID:
7336 case NS_RPT_ID:
7337 BIG_ENDIAN_32(bp);
7338 bp += 4;
7339 BIG_ENDIAN_32(bp);
7340 break;
7341 case NS_GNN_IP:
7342 case NS_GIPA_IP:
7343 BIG_ENDIAN(bp, 16);
7344 break;
7345 case NS_RIP_NN:
7346 bp += 8;
7347 BIG_ENDIAN(bp, 16);
7348 break;
7349 case NS_RIPA_NN:
7350 bp += 8;
7351 BIG_ENDIAN_64(bp);
7352 break;
7353 default:
7354 break;
7358 if (restore == B_FALSE) {
7359 for (i = 0; i < ((sizeof (hdr)) / (sizeof (uint32_t))); i++) {
7360 *hdrp = BE_32(*hdrp);
7361 hdrp++;
7365 ddi_rep_put8(acc_handle, (uint8_t *)&hdr,
7366 (uint8_t *)ct_hdr, sizeof (hdr), DDI_DEV_AUTOINCR);
7370 * ql_start_cmd
7371 * Finishes starting fibre channel protocol (FCP) command.
7373 * Input:
7374 * ha: adapter state pointer.
7375 * tq: target queue pointer.
7376 * pkt: pointer to fc_packet.
7377 * sp: SRB pointer.
7379 * Context:
7380 * Kernel context.
7382 static int
7383 ql_start_cmd(ql_adapter_state_t *ha, ql_tgt_t *tq, fc_packet_t *pkt,
7384 ql_srb_t *sp)
7386 int rval = FC_SUCCESS;
7387 time_t poll_wait = 0;
7388 ql_lun_t *lq = sp->lun_queue;
7390 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7392 sp->handle = 0;
7394 /* Set poll for finish. */
7395 if (pkt->pkt_tran_flags & FC_TRAN_NO_INTR) {
7396 sp->flags |= SRB_POLL;
7397 if (pkt->pkt_timeout == 0) {
7398 pkt->pkt_timeout = SCSI_POLL_TIMEOUT;
7402 /* Acquire device queue lock. */
7403 DEVICE_QUEUE_LOCK(tq);
7406 * If we need authentication, report device busy to
7407 * upper layers to retry later
7409 if (tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION)) {
7410 DEVICE_QUEUE_UNLOCK(tq);
7411 EL(ha, "failed, FC_DEVICE_BUSY=%xh, d_id=%xh\n", tq->flags,
7412 tq->d_id.b24);
7413 return (FC_DEVICE_BUSY);
7416 /* Insert command onto watchdog queue. */
7417 if (!(pkt->pkt_tran_flags & FC_TRAN_DUMPING)) {
7418 ql_timeout_insert(ha, tq, sp);
7419 } else {
7421 * Run dump requests in polled mode as kernel threads
7422 * and interrupts may have been disabled.
7424 sp->flags |= SRB_POLL;
7425 sp->init_wdg_q_time = 0;
7426 sp->isp_timeout = 0;
7429 /* If a polling command setup wait time. */
7430 if (sp->flags & SRB_POLL) {
7431 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7432 poll_wait = (sp->wdg_q_time + 2) * WATCHDOG_TIME;
7433 } else {
7434 poll_wait = pkt->pkt_timeout;
7438 if (ha->pha->flags & ABORT_CMDS_LOOP_DOWN_TMO &&
7439 (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING))) {
7440 /* Set ending status. */
7441 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
7443 /* Call done routine to handle completions. */
7444 sp->cmd.next = NULL;
7445 DEVICE_QUEUE_UNLOCK(tq);
7446 ql_done(&sp->cmd);
7447 } else {
7448 if (ddi_in_panic() && (sp->flags & SRB_POLL)) {
7449 int do_lip = 0;
7451 DEVICE_QUEUE_UNLOCK(tq);
7453 ADAPTER_STATE_LOCK(ha);
7454 if ((do_lip = ha->pha->lip_on_panic) == 0) {
7455 ha->pha->lip_on_panic++;
7457 ADAPTER_STATE_UNLOCK(ha);
7459 if (!do_lip) {
7462 * That Qlogic F/W performs PLOGI, PRLI, etc
7463 * is helpful here. If a PLOGI fails for some
7464 * reason, you would get CS_PORT_LOGGED_OUT
7465 * or some such error; and we should get a
7466 * careful polled mode login kicked off inside
7467 * of this driver itself. You don't have FC
7468 * transport's services as all threads are
7469 * suspended, interrupts disabled, and so
7470 * on. Right now we do re-login if the packet
7471 * state isn't FC_PKT_SUCCESS.
7473 (void) ql_abort_isp(ha);
7476 ql_start_iocb(ha, sp);
7477 } else {
7478 /* Add the command to the device queue */
7479 if (pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
7480 ql_add_link_t(&lq->cmd, &sp->cmd);
7481 } else {
7482 ql_add_link_b(&lq->cmd, &sp->cmd);
7485 sp->flags |= SRB_IN_DEVICE_QUEUE;
7487 /* Check whether next message can be processed */
7488 ql_next(ha, lq);
7492 /* If polling, wait for finish. */
7493 if (poll_wait) {
7494 if (ql_poll_cmd(ha, sp, poll_wait) != QL_SUCCESS) {
7495 int res;
7497 res = ql_abort((opaque_t)ha, pkt, 0);
7498 if (res != FC_SUCCESS && res != FC_ABORTED) {
7499 DEVICE_QUEUE_LOCK(tq);
7500 ql_remove_link(&lq->cmd, &sp->cmd);
7501 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7502 DEVICE_QUEUE_UNLOCK(tq);
7506 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7507 EL(ha, "failed, FC_TRANSPORT_ERROR\n");
7508 rval = FC_TRANSPORT_ERROR;
7511 if (ddi_in_panic()) {
7512 if (pkt->pkt_state != FC_PKT_SUCCESS) {
7513 port_id_t d_id;
7516 * successful LOGIN implies by design
7517 * that PRLI also succeeded for disks
7518 * Note also that there is no special
7519 * mailbox command to send PRLI.
7521 d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
7522 (void) ql_login_port(ha, d_id);
7527 * This should only happen during CPR dumping
7529 if (!(pkt->pkt_tran_flags & FC_TRAN_NO_INTR) &&
7530 pkt->pkt_comp) {
7531 sp->flags &= ~SRB_POLL;
7532 (*pkt->pkt_comp)(pkt);
7536 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7538 return (rval);
7542 * ql_poll_cmd
7543 * Polls commands for completion.
7545 * Input:
7546 * ha = adapter state pointer.
7547 * sp = SRB command pointer.
7548 * poll_wait = poll wait time in seconds.
7550 * Returns:
7551 * QL local function return status code.
7553 * Context:
7554 * Kernel context.
7556 static int
7557 ql_poll_cmd(ql_adapter_state_t *vha, ql_srb_t *sp, time_t poll_wait)
7559 int rval = QL_SUCCESS;
7560 time_t msecs_left = poll_wait * 100; /* 10ms inc */
7561 ql_adapter_state_t *ha = vha->pha;
7563 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7565 while (sp->flags & SRB_POLL) {
7567 if ((ha->flags & INTERRUPTS_ENABLED) == 0 ||
7568 ha->idle_timer >= 15 || ddi_in_panic()) {
7570 /* If waiting for restart, do it now. */
7571 if (ha->port_retry_timer != 0) {
7572 ADAPTER_STATE_LOCK(ha);
7573 ha->port_retry_timer = 0;
7574 ADAPTER_STATE_UNLOCK(ha);
7576 TASK_DAEMON_LOCK(ha);
7577 ha->task_daemon_flags |= PORT_RETRY_NEEDED;
7578 TASK_DAEMON_UNLOCK(ha);
7581 if (INTERRUPT_PENDING(ha)) {
7582 (void) ql_isr((caddr_t)ha);
7583 INTR_LOCK(ha);
7584 ha->intr_claimed = TRUE;
7585 INTR_UNLOCK(ha);
7589 * Call task thread function in case the
7590 * daemon is not running.
7592 TASK_DAEMON_LOCK(ha);
7594 if (!ddi_in_panic() && QL_DAEMON_NOT_ACTIVE(ha) &&
7595 QL_TASK_PENDING(ha)) {
7596 ha->task_daemon_flags |= TASK_THREAD_CALLED;
7597 ql_task_thread(ha);
7598 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
7601 TASK_DAEMON_UNLOCK(ha);
7604 if (msecs_left < 10) {
7605 rval = QL_FUNCTION_TIMEOUT;
7606 break;
7610 * Polling interval is 10 milli seconds; Increasing
7611 * the polling interval to seconds since disk IO
7612 * timeout values are ~60 seconds is tempting enough,
7613 * but CPR dump time increases, and so will the crash
7614 * dump time; Don't toy with the settings without due
7615 * consideration for all the scenarios that will be
7616 * impacted.
7618 ql_delay(ha, 10000);
7619 msecs_left -= 10;
7622 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7624 return (rval);
7628 * ql_next
7629 * Retrieve and process next job in the device queue.
7631 * Input:
7632 * ha: adapter state pointer.
7633 * lq: LUN queue pointer.
7634 * DEVICE_QUEUE_LOCK must be already obtained.
7636 * Output:
7637 * Releases DEVICE_QUEUE_LOCK upon exit.
7639 * Context:
7640 * Interrupt or Kernel context, no mailbox commands allowed.
7642 void
7643 ql_next(ql_adapter_state_t *vha, ql_lun_t *lq)
7645 ql_srb_t *sp;
7646 ql_link_t *link;
7647 ql_tgt_t *tq = lq->target_queue;
7648 ql_adapter_state_t *ha = vha->pha;
7650 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7652 if (ddi_in_panic()) {
7653 DEVICE_QUEUE_UNLOCK(tq);
7654 QL_PRINT_3(CE_CONT, "(%d): panic/active exit\n",
7655 ha->instance);
7656 return;
7659 while ((link = lq->cmd.first) != NULL) {
7660 sp = link->base_address;
7662 /* Exit if can not start commands. */
7663 if (DRIVER_SUSPENDED(ha) ||
7664 (ha->flags & ONLINE) == 0 ||
7665 !VALID_DEVICE_ID(ha, tq->loop_id) ||
7666 sp->flags & SRB_ABORT ||
7667 tq->flags & (TQF_RSCN_RCVD | TQF_NEED_AUTHENTICATION |
7668 TQF_QUEUE_SUSPENDED)) {
7669 EL(vha, "break, d_id=%xh, tdf=%xh, tqf=%xh, spf=%xh, "
7670 "haf=%xh, loop_id=%xh\n", tq->d_id.b24,
7671 ha->task_daemon_flags, tq->flags, sp->flags,
7672 ha->flags, tq->loop_id);
7673 break;
7677 * Find out the LUN number for untagged command use.
7678 * If there is an untagged command pending for the LUN,
7679 * we would not submit another untagged command
7680 * or if reached LUN execution throttle.
7682 if (sp->flags & SRB_FCP_CMD_PKT) {
7683 if (lq->flags & LQF_UNTAGGED_PENDING ||
7684 lq->lun_outcnt >= ha->execution_throttle) {
7685 QL_PRINT_8(CE_CONT, "(%d): break, d_id=%xh, "
7686 "lf=%xh, lun_outcnt=%xh\n", ha->instance,
7687 tq->d_id.b24, lq->flags, lq->lun_outcnt);
7688 break;
7690 if (sp->fcp->fcp_cntl.cntl_qtype ==
7691 FCP_QTYPE_UNTAGGED) {
7693 * Set the untagged-flag for the LUN
7694 * so that no more untagged commands
7695 * can be submitted for this LUN.
7697 lq->flags |= LQF_UNTAGGED_PENDING;
7700 /* Count command as sent. */
7701 lq->lun_outcnt++;
7704 /* Remove srb from device queue. */
7705 ql_remove_link(&lq->cmd, &sp->cmd);
7706 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
7708 tq->outcnt++;
7710 ql_start_iocb(vha, sp);
7713 /* Release device queue lock. */
7714 DEVICE_QUEUE_UNLOCK(tq);
7716 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
7720 * ql_done
7721 * Process completed commands.
7723 * Input:
7724 * link: first command link in chain.
7726 * Context:
7727 * Interrupt or Kernel context, no mailbox commands allowed.
7729 void
7730 ql_done(ql_link_t *link)
7732 ql_adapter_state_t *ha;
7733 ql_link_t *next_link;
7734 ql_srb_t *sp;
7735 ql_tgt_t *tq;
7736 ql_lun_t *lq;
7738 QL_PRINT_3(CE_CONT, "started\n");
7740 for (; link != NULL; link = next_link) {
7741 next_link = link->next;
7742 sp = link->base_address;
7743 ha = sp->ha;
7745 if (sp->flags & SRB_UB_CALLBACK) {
7746 QL_UB_LOCK(ha);
7747 if (sp->flags & SRB_UB_IN_ISP) {
7748 if (ha->ub_outcnt != 0) {
7749 ha->ub_outcnt--;
7751 QL_UB_UNLOCK(ha);
7752 ql_isp_rcvbuf(ha);
7753 QL_UB_LOCK(ha);
7755 QL_UB_UNLOCK(ha);
7756 ql_awaken_task_daemon(ha, sp, 0, 0);
7757 } else {
7758 /* Free outstanding command slot. */
7759 if (sp->handle != 0) {
7760 ha->outstanding_cmds[
7761 sp->handle & OSC_INDEX_MASK] = NULL;
7762 sp->handle = 0;
7763 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
7766 /* Acquire device queue lock. */
7767 lq = sp->lun_queue;
7768 tq = lq->target_queue;
7769 DEVICE_QUEUE_LOCK(tq);
7771 /* Decrement outstanding commands on device. */
7772 if (tq->outcnt != 0) {
7773 tq->outcnt--;
7776 if (sp->flags & SRB_FCP_CMD_PKT) {
7777 if (sp->fcp->fcp_cntl.cntl_qtype ==
7778 FCP_QTYPE_UNTAGGED) {
7780 * Clear the flag for this LUN so that
7781 * untagged commands can be submitted
7782 * for it.
7784 lq->flags &= ~LQF_UNTAGGED_PENDING;
7787 if (lq->lun_outcnt != 0) {
7788 lq->lun_outcnt--;
7792 /* Reset port down retry count on good completion. */
7793 if (sp->pkt->pkt_reason == CS_COMPLETE) {
7794 tq->port_down_retry_count =
7795 ha->port_down_retry_count;
7796 tq->qfull_retry_count = ha->qfull_retry_count;
7800 /* Alter aborted status for fast timeout feature */
7801 if (CFG_IST(ha, CFG_FAST_TIMEOUT) &&
7802 (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7803 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7804 sp->flags & SRB_RETRY &&
7805 (sp->flags & SRB_WATCHDOG_ENABLED &&
7806 sp->wdg_q_time > 1)) {
7807 EL(ha, "fast abort modify change\n");
7808 sp->flags &= ~(SRB_RETRY);
7809 sp->pkt->pkt_reason = CS_TIMEOUT;
7812 /* Place request back on top of target command queue */
7813 if ((sp->flags & (SRB_MS_PKT | SRB_ELS_PKT) ||
7814 !(tq->flags & TQF_NEED_AUTHENTICATION)) &&
7815 sp->flags & SRB_RETRY &&
7816 (sp->flags & SRB_WATCHDOG_ENABLED &&
7817 sp->wdg_q_time > 1)) {
7818 sp->flags &= ~(SRB_ISP_STARTED |
7819 SRB_ISP_COMPLETED | SRB_RETRY);
7821 /* Reset watchdog timer */
7822 sp->wdg_q_time = sp->init_wdg_q_time;
7824 /* Issue marker command on reset status. */
7825 if (!(ha->task_daemon_flags & LOOP_DOWN) &&
7826 (sp->pkt->pkt_reason == CS_RESET ||
7827 (CFG_IST(ha, CFG_CTRL_24258081) &&
7828 sp->pkt->pkt_reason == CS_ABORTED))) {
7829 (void) ql_marker(ha, tq->loop_id, 0,
7830 MK_SYNC_ID);
7833 ql_add_link_t(&lq->cmd, &sp->cmd);
7834 sp->flags |= SRB_IN_DEVICE_QUEUE;
7835 ql_next(ha, lq);
7836 } else {
7837 /* Remove command from watchdog queue. */
7838 if (sp->flags & SRB_WATCHDOG_ENABLED) {
7839 ql_remove_link(&tq->wdg, &sp->wdg);
7840 sp->flags &= ~SRB_WATCHDOG_ENABLED;
7843 if (lq->cmd.first != NULL) {
7844 ql_next(ha, lq);
7845 } else {
7846 /* Release LU queue specific lock. */
7847 DEVICE_QUEUE_UNLOCK(tq);
7848 if (ha->pha->pending_cmds.first !=
7849 NULL) {
7850 ql_start_iocb(ha, NULL);
7854 /* Sync buffers if required. */
7855 if (sp->flags & (SRB_MS_PKT | SRB_ELS_PKT)) {
7856 (void) ddi_dma_sync(
7857 sp->pkt->pkt_resp_dma,
7858 0, 0, DDI_DMA_SYNC_FORCPU);
7861 /* Map ISP completion codes. */
7862 sp->pkt->pkt_expln = FC_EXPLN_NONE;
7863 sp->pkt->pkt_action = FC_ACTION_RETRYABLE;
7864 switch (sp->pkt->pkt_reason) {
7865 case CS_COMPLETE:
7866 sp->pkt->pkt_state = FC_PKT_SUCCESS;
7867 break;
7868 case CS_RESET:
7869 /* Issue marker command. */
7870 if (!(ha->task_daemon_flags &
7871 LOOP_DOWN)) {
7872 (void) ql_marker(ha,
7873 tq->loop_id, 0,
7874 MK_SYNC_ID);
7876 sp->pkt->pkt_state =
7877 FC_PKT_PORT_OFFLINE;
7878 sp->pkt->pkt_reason =
7879 FC_REASON_ABORTED;
7880 break;
7881 case CS_RESOUCE_UNAVAILABLE:
7882 sp->pkt->pkt_state = FC_PKT_LOCAL_BSY;
7883 sp->pkt->pkt_reason =
7884 FC_REASON_PKT_BUSY;
7885 break;
7887 case CS_TIMEOUT:
7888 sp->pkt->pkt_state = FC_PKT_TIMEOUT;
7889 sp->pkt->pkt_reason =
7890 FC_REASON_HW_ERROR;
7891 break;
7892 case CS_DATA_OVERRUN:
7893 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7894 sp->pkt->pkt_reason =
7895 FC_REASON_OVERRUN;
7896 break;
7897 case CS_PORT_UNAVAILABLE:
7898 case CS_PORT_LOGGED_OUT:
7899 sp->pkt->pkt_state =
7900 FC_PKT_PORT_OFFLINE;
7901 sp->pkt->pkt_reason =
7902 FC_REASON_LOGIN_REQUIRED;
7903 ql_send_logo(ha, tq, NULL);
7904 break;
7905 case CS_PORT_CONFIG_CHG:
7906 sp->pkt->pkt_state =
7907 FC_PKT_PORT_OFFLINE;
7908 sp->pkt->pkt_reason =
7909 FC_REASON_OFFLINE;
7910 break;
7911 case CS_QUEUE_FULL:
7912 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7913 sp->pkt->pkt_reason = FC_REASON_QFULL;
7914 break;
7916 case CS_ABORTED:
7917 DEVICE_QUEUE_LOCK(tq);
7918 if (tq->flags & (TQF_RSCN_RCVD |
7919 TQF_NEED_AUTHENTICATION)) {
7920 sp->pkt->pkt_state =
7921 FC_PKT_PORT_OFFLINE;
7922 sp->pkt->pkt_reason =
7923 FC_REASON_LOGIN_REQUIRED;
7924 } else {
7925 sp->pkt->pkt_state =
7926 FC_PKT_LOCAL_RJT;
7927 sp->pkt->pkt_reason =
7928 FC_REASON_ABORTED;
7930 DEVICE_QUEUE_UNLOCK(tq);
7931 break;
7933 case CS_TRANSPORT:
7934 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7935 sp->pkt->pkt_reason =
7936 FC_PKT_TRAN_ERROR;
7937 break;
7939 case CS_DATA_UNDERRUN:
7940 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7941 sp->pkt->pkt_reason =
7942 FC_REASON_UNDERRUN;
7943 break;
7944 case CS_DMA_ERROR:
7945 case CS_BAD_PAYLOAD:
7946 case CS_UNKNOWN:
7947 case CS_CMD_FAILED:
7948 default:
7949 sp->pkt->pkt_state = FC_PKT_LOCAL_RJT;
7950 sp->pkt->pkt_reason =
7951 FC_REASON_HW_ERROR;
7952 break;
7955 /* Now call the pkt completion callback */
7956 if (sp->flags & SRB_POLL) {
7957 sp->flags &= ~SRB_POLL;
7958 } else if (sp->pkt->pkt_comp) {
7959 if (sp->pkt->pkt_tran_flags &
7960 FC_TRAN_IMMEDIATE_CB) {
7961 (*sp->pkt->pkt_comp)(sp->pkt);
7962 } else {
7963 ql_awaken_task_daemon(ha, sp,
7964 0, 0);
7971 QL_PRINT_3(CE_CONT, "done\n");
7975 * ql_awaken_task_daemon
7976 * Adds command completion callback to callback queue and/or
7977 * awakens task daemon thread.
7979 * Input:
7980 * ha: adapter state pointer.
7981 * sp: srb pointer.
7982 * set_flags: task daemon flags to set.
7983 * reset_flags: task daemon flags to reset.
7985 * Context:
7986 * Interrupt or Kernel context, no mailbox commands allowed.
7988 void
7989 ql_awaken_task_daemon(ql_adapter_state_t *vha, ql_srb_t *sp,
7990 uint32_t set_flags, uint32_t reset_flags)
7992 ql_adapter_state_t *ha = vha->pha;
7994 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
7996 /* Acquire task daemon lock. */
7997 TASK_DAEMON_LOCK(ha);
7999 if (set_flags & ISP_ABORT_NEEDED) {
8000 if (ha->task_daemon_flags & ABORT_ISP_ACTIVE) {
8001 set_flags &= ~ISP_ABORT_NEEDED;
8005 ha->task_daemon_flags |= set_flags;
8006 ha->task_daemon_flags &= ~reset_flags;
8008 if (QL_DAEMON_SUSPENDED(ha)) {
8009 if (sp != NULL) {
8010 TASK_DAEMON_UNLOCK(ha);
8012 /* Do callback. */
8013 if (sp->flags & SRB_UB_CALLBACK) {
8014 ql_unsol_callback(sp);
8015 } else {
8016 (*sp->pkt->pkt_comp)(sp->pkt);
8018 } else {
8019 if (!(curthread->t_flag & T_INTR_THREAD) &&
8020 !(ha->task_daemon_flags & TASK_THREAD_CALLED)) {
8021 ha->task_daemon_flags |= TASK_THREAD_CALLED;
8022 ql_task_thread(ha);
8023 ha->task_daemon_flags &= ~TASK_THREAD_CALLED;
8026 TASK_DAEMON_UNLOCK(ha);
8028 } else {
8029 if (sp != NULL) {
8030 ql_add_link_b(&ha->callback_queue, &sp->cmd);
8033 if (ha->task_daemon_flags & TASK_DAEMON_SLEEPING_FLG) {
8034 cv_broadcast(&ha->cv_task_daemon);
8036 TASK_DAEMON_UNLOCK(ha);
8039 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8043 * ql_task_daemon
8044 * Thread that is awaken by the driver when a
8045 * background needs to be done.
8047 * Input:
8048 * arg = adapter state pointer.
8050 * Context:
8051 * Kernel context.
8053 static void
8054 ql_task_daemon(void *arg)
8056 ql_adapter_state_t *ha = (void *)arg;
8058 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8060 CALLB_CPR_INIT(&ha->cprinfo, &ha->task_daemon_mutex, callb_generic_cpr,
8061 "ql_task_daemon");
8063 /* Acquire task daemon lock. */
8064 TASK_DAEMON_LOCK(ha);
8066 ha->task_daemon_flags |= TASK_DAEMON_ALIVE_FLG;
8068 while ((ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) == 0) {
8069 ql_task_thread(ha);
8071 QL_PRINT_3(CE_CONT, "(%d): Going to sleep\n", ha->instance);
8074 * Before we wait on the conditional variable, we
8075 * need to check if STOP_FLG is set for us to terminate
8077 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
8078 break;
8081 /*LINTED [Solaris CALLB_CPR_SAFE_BEGIN Lint error]*/
8082 CALLB_CPR_SAFE_BEGIN(&ha->cprinfo);
8084 ha->task_daemon_flags |= TASK_DAEMON_SLEEPING_FLG;
8086 /* If killed, stop task daemon */
8087 if (cv_wait_sig(&ha->cv_task_daemon,
8088 &ha->task_daemon_mutex) == 0) {
8089 ha->task_daemon_flags |= TASK_DAEMON_STOP_FLG;
8092 ha->task_daemon_flags &= ~TASK_DAEMON_SLEEPING_FLG;
8094 /*LINTED [Solaris CALLB_CPR_SAFE_END Lint error]*/
8095 CALLB_CPR_SAFE_END(&ha->cprinfo, &ha->task_daemon_mutex);
8097 QL_PRINT_3(CE_CONT, "(%d): Awakened\n", ha->instance);
8100 ha->task_daemon_flags &= ~(TASK_DAEMON_STOP_FLG |
8101 TASK_DAEMON_ALIVE_FLG);
8103 /*LINTED [Solaris CALLB_CPR_EXIT Lint error]*/
8104 CALLB_CPR_EXIT(&ha->cprinfo);
8106 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8108 thread_exit();
8112 * ql_task_thread
8113 * Thread run by daemon.
8115 * Input:
8116 * ha = adapter state pointer.
8117 * TASK_DAEMON_LOCK must be acquired prior to call.
8119 * Context:
8120 * Kernel context.
8122 static void
8123 ql_task_thread(ql_adapter_state_t *ha)
8125 int loop_again;
8126 ql_srb_t *sp;
8127 ql_head_t *head;
8128 ql_link_t *link;
8129 caddr_t msg;
8130 ql_adapter_state_t *vha;
8132 do {
8133 QL_PRINT_3(CE_CONT, "(%d): task_daemon_flags=%xh\n",
8134 ha->instance, ha->task_daemon_flags);
8136 loop_again = FALSE;
8138 QL_PM_LOCK(ha);
8139 if (ha->power_level != PM_LEVEL_D0) {
8140 QL_PM_UNLOCK(ha);
8141 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8142 break;
8144 QL_PM_UNLOCK(ha);
8146 /* IDC event. */
8147 if (ha->task_daemon_flags & IDC_EVENT) {
8148 ha->task_daemon_flags &= ~IDC_EVENT;
8149 TASK_DAEMON_UNLOCK(ha);
8150 ql_process_idc_event(ha);
8151 TASK_DAEMON_LOCK(ha);
8152 loop_again = TRUE;
8155 if (ha->flags & ADAPTER_SUSPENDED || ha->task_daemon_flags &
8156 (TASK_DAEMON_STOP_FLG | DRIVER_STALL) ||
8157 (ha->flags & ONLINE) == 0) {
8158 ha->task_daemon_flags |= TASK_DAEMON_STALLED_FLG;
8159 break;
8161 ha->task_daemon_flags &= ~TASK_DAEMON_STALLED_FLG;
8163 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8164 TASK_DAEMON_UNLOCK(ha);
8165 if (ha->log_parity_pause == B_TRUE) {
8166 (void) ql_flash_errlog(ha,
8167 FLASH_ERRLOG_PARITY_ERR, 0,
8168 MSW(ha->parity_stat_err),
8169 LSW(ha->parity_stat_err));
8170 ha->log_parity_pause = B_FALSE;
8172 ql_port_state(ha, FC_STATE_OFFLINE, FC_STATE_CHANGE);
8173 TASK_DAEMON_LOCK(ha);
8174 loop_again = TRUE;
8177 /* Idle Check. */
8178 if (ha->task_daemon_flags & TASK_DAEMON_IDLE_CHK_FLG) {
8179 ha->task_daemon_flags &= ~TASK_DAEMON_IDLE_CHK_FLG;
8180 if (!(ha->task_daemon_flags & QL_SUSPENDED)) {
8181 TASK_DAEMON_UNLOCK(ha);
8182 ql_idle_check(ha);
8183 TASK_DAEMON_LOCK(ha);
8184 loop_again = TRUE;
8188 /* Crystal+ port#0 bypass transition */
8189 if (ha->task_daemon_flags & HANDLE_PORT_BYPASS_CHANGE) {
8190 ha->task_daemon_flags &= ~HANDLE_PORT_BYPASS_CHANGE;
8191 TASK_DAEMON_UNLOCK(ha);
8192 (void) ql_initiate_lip(ha);
8193 TASK_DAEMON_LOCK(ha);
8194 loop_again = TRUE;
8197 /* Abort queues needed. */
8198 if (ha->task_daemon_flags & ABORT_QUEUES_NEEDED) {
8199 ha->task_daemon_flags &= ~ABORT_QUEUES_NEEDED;
8200 TASK_DAEMON_UNLOCK(ha);
8201 ql_abort_queues(ha);
8202 TASK_DAEMON_LOCK(ha);
8205 /* Not suspended, awaken waiting routines. */
8206 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8207 ha->task_daemon_flags & SUSPENDED_WAKEUP_FLG) {
8208 ha->task_daemon_flags &= ~SUSPENDED_WAKEUP_FLG;
8209 cv_broadcast(&ha->cv_dr_suspended);
8210 loop_again = TRUE;
8213 /* Handle RSCN changes. */
8214 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8215 if (vha->task_daemon_flags & RSCN_UPDATE_NEEDED) {
8216 vha->task_daemon_flags &= ~RSCN_UPDATE_NEEDED;
8217 TASK_DAEMON_UNLOCK(ha);
8218 (void) ql_handle_rscn_update(vha);
8219 TASK_DAEMON_LOCK(ha);
8220 loop_again = TRUE;
8224 /* Handle state changes. */
8225 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8226 if (vha->task_daemon_flags & FC_STATE_CHANGE &&
8227 !(ha->task_daemon_flags &
8228 TASK_DAEMON_POWERING_DOWN)) {
8229 /* Report state change. */
8230 EL(vha, "state change = %xh\n", vha->state);
8231 vha->task_daemon_flags &= ~FC_STATE_CHANGE;
8233 if (vha->task_daemon_flags &
8234 COMMAND_WAIT_NEEDED) {
8235 vha->task_daemon_flags &=
8236 ~COMMAND_WAIT_NEEDED;
8237 if (!(ha->task_daemon_flags &
8238 COMMAND_WAIT_ACTIVE)) {
8239 ha->task_daemon_flags |=
8240 COMMAND_WAIT_ACTIVE;
8241 TASK_DAEMON_UNLOCK(ha);
8242 ql_cmd_wait(ha);
8243 TASK_DAEMON_LOCK(ha);
8244 ha->task_daemon_flags &=
8245 ~COMMAND_WAIT_ACTIVE;
8249 msg = NULL;
8250 if (FC_PORT_STATE_MASK(vha->state) ==
8251 FC_STATE_OFFLINE) {
8252 if (vha->task_daemon_flags &
8253 STATE_ONLINE) {
8254 if (ha->topology &
8255 QL_LOOP_CONNECTION) {
8256 msg = "Loop OFFLINE";
8257 } else {
8258 msg = "Link OFFLINE";
8261 vha->task_daemon_flags &=
8262 ~STATE_ONLINE;
8263 } else if (FC_PORT_STATE_MASK(vha->state) ==
8264 FC_STATE_LOOP) {
8265 if (!(vha->task_daemon_flags &
8266 STATE_ONLINE)) {
8267 msg = "Loop ONLINE";
8269 vha->task_daemon_flags |= STATE_ONLINE;
8270 } else if (FC_PORT_STATE_MASK(vha->state) ==
8271 FC_STATE_ONLINE) {
8272 if (!(vha->task_daemon_flags &
8273 STATE_ONLINE)) {
8274 msg = "Link ONLINE";
8276 vha->task_daemon_flags |= STATE_ONLINE;
8277 } else {
8278 msg = "Unknown Link state";
8281 if (msg != NULL) {
8282 cmn_err(CE_NOTE, "!Qlogic %s(%d,%d): "
8283 "%s", QL_NAME, ha->instance,
8284 vha->vp_index, msg);
8287 if (vha->flags & FCA_BOUND) {
8288 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8289 "cb state=%xh\n", ha->instance,
8290 vha->vp_index, vha->state);
8291 TASK_DAEMON_UNLOCK(ha);
8292 (vha->bind_info.port_statec_cb)
8293 (vha->bind_info.port_handle,
8294 vha->state);
8295 TASK_DAEMON_LOCK(ha);
8297 loop_again = TRUE;
8301 if (ha->task_daemon_flags & LIP_RESET_PENDING &&
8302 !(ha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN)) {
8303 EL(ha, "processing LIP reset\n");
8304 ha->task_daemon_flags &= ~LIP_RESET_PENDING;
8305 TASK_DAEMON_UNLOCK(ha);
8306 for (vha = ha; vha != NULL; vha = vha->vp_next) {
8307 if (vha->flags & FCA_BOUND) {
8308 QL_PRINT_10(CE_CONT, "(%d,%d): statec_"
8309 "cb reset\n", ha->instance,
8310 vha->vp_index);
8311 (vha->bind_info.port_statec_cb)
8312 (vha->bind_info.port_handle,
8313 FC_STATE_TARGET_PORT_RESET);
8316 TASK_DAEMON_LOCK(ha);
8317 loop_again = TRUE;
8320 if (QL_IS_SET(ha->task_daemon_flags, NEED_UNSOLICITED_BUFFERS |
8321 FIRMWARE_UP)) {
8323 * The firmware needs more unsolicited
8324 * buffers. We cannot allocate any new
8325 * buffers unless the ULP module requests
8326 * for new buffers. All we can do here is
8327 * to give received buffers from the pool
8328 * that is already allocated
8330 ha->task_daemon_flags &= ~NEED_UNSOLICITED_BUFFERS;
8331 TASK_DAEMON_UNLOCK(ha);
8332 ql_isp_rcvbuf(ha);
8333 TASK_DAEMON_LOCK(ha);
8334 loop_again = TRUE;
8337 if (ha->task_daemon_flags & ISP_ABORT_NEEDED) {
8338 TASK_DAEMON_UNLOCK(ha);
8339 (void) ql_abort_isp(ha);
8340 TASK_DAEMON_LOCK(ha);
8341 loop_again = TRUE;
8344 if (!(ha->task_daemon_flags & (LOOP_DOWN | DRIVER_STALL |
8345 COMMAND_WAIT_NEEDED))) {
8346 if (QL_IS_SET(ha->task_daemon_flags,
8347 RESET_MARKER_NEEDED | FIRMWARE_UP)) {
8348 ha->task_daemon_flags &= ~RESET_MARKER_NEEDED;
8349 if (!(ha->task_daemon_flags & RESET_ACTIVE)) {
8350 ha->task_daemon_flags |= RESET_ACTIVE;
8351 TASK_DAEMON_UNLOCK(ha);
8352 for (vha = ha; vha != NULL;
8353 vha = vha->vp_next) {
8354 ql_rst_aen(vha);
8356 TASK_DAEMON_LOCK(ha);
8357 ha->task_daemon_flags &= ~RESET_ACTIVE;
8358 loop_again = TRUE;
8362 if (QL_IS_SET(ha->task_daemon_flags,
8363 LOOP_RESYNC_NEEDED | FIRMWARE_UP)) {
8364 if (!(ha->task_daemon_flags &
8365 LOOP_RESYNC_ACTIVE)) {
8366 ha->task_daemon_flags |=
8367 LOOP_RESYNC_ACTIVE;
8368 TASK_DAEMON_UNLOCK(ha);
8369 (void) ql_loop_resync(ha);
8370 TASK_DAEMON_LOCK(ha);
8371 loop_again = TRUE;
8376 /* Port retry needed. */
8377 if (ha->task_daemon_flags & PORT_RETRY_NEEDED) {
8378 ha->task_daemon_flags &= ~PORT_RETRY_NEEDED;
8379 ADAPTER_STATE_LOCK(ha);
8380 ha->port_retry_timer = 0;
8381 ADAPTER_STATE_UNLOCK(ha);
8383 TASK_DAEMON_UNLOCK(ha);
8384 ql_restart_queues(ha);
8385 TASK_DAEMON_LOCK(ha);
8386 loop_again = B_TRUE;
8389 /* iiDMA setting needed? */
8390 if (ha->task_daemon_flags & TD_IIDMA_NEEDED) {
8391 ha->task_daemon_flags &= ~TD_IIDMA_NEEDED;
8393 TASK_DAEMON_UNLOCK(ha);
8394 ql_iidma(ha);
8395 TASK_DAEMON_LOCK(ha);
8396 loop_again = B_TRUE;
8399 if (ha->task_daemon_flags & SEND_PLOGI) {
8400 ha->task_daemon_flags &= ~SEND_PLOGI;
8401 TASK_DAEMON_UNLOCK(ha);
8402 (void) ql_n_port_plogi(ha);
8403 TASK_DAEMON_LOCK(ha);
8406 head = &ha->callback_queue;
8407 if (head->first != NULL) {
8408 sp = head->first->base_address;
8409 link = &sp->cmd;
8411 /* Dequeue command. */
8412 ql_remove_link(head, link);
8414 /* Release task daemon lock. */
8415 TASK_DAEMON_UNLOCK(ha);
8417 /* Do callback. */
8418 if (sp->flags & SRB_UB_CALLBACK) {
8419 ql_unsol_callback(sp);
8420 } else {
8421 (*sp->pkt->pkt_comp)(sp->pkt);
8424 /* Acquire task daemon lock. */
8425 TASK_DAEMON_LOCK(ha);
8427 loop_again = TRUE;
8430 } while (loop_again);
8434 * ql_idle_check
8435 * Test for adapter is alive and well.
8437 * Input:
8438 * ha: adapter state pointer.
8440 * Context:
8441 * Kernel context.
8443 static void
8444 ql_idle_check(ql_adapter_state_t *ha)
8446 ddi_devstate_t state;
8447 int rval;
8448 ql_mbx_data_t mr;
8450 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8452 /* Firmware Ready Test. */
8453 rval = ql_get_firmware_state(ha, &mr);
8454 if (!(ha->task_daemon_flags & QL_SUSPENDED) &&
8455 (rval != QL_SUCCESS || mr.mb[1] != FSTATE_READY)) {
8456 EL(ha, "failed, Firmware Ready Test = %xh\n", rval);
8457 state = ddi_get_devstate(ha->dip);
8458 if (state == DDI_DEVSTATE_UP) {
8459 /*EMPTY*/
8460 ddi_dev_report_fault(ha->dip, DDI_SERVICE_DEGRADED,
8461 DDI_DEVICE_FAULT, "Firmware Ready Test failed");
8463 TASK_DAEMON_LOCK(ha);
8464 if (!(ha->task_daemon_flags & ABORT_ISP_ACTIVE)) {
8465 EL(ha, "fstate_ready, isp_abort_needed\n");
8466 ha->task_daemon_flags |= ISP_ABORT_NEEDED;
8468 TASK_DAEMON_UNLOCK(ha);
8471 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8475 * ql_unsol_callback
8476 * Handle unsolicited buffer callbacks.
8478 * Input:
8479 * ha = adapter state pointer.
8480 * sp = srb pointer.
8482 * Context:
8483 * Kernel context.
8485 static void
8486 ql_unsol_callback(ql_srb_t *sp)
8488 fc_affected_id_t *af;
8489 fc_unsol_buf_t *ubp;
8490 uchar_t r_ctl;
8491 uchar_t ls_code;
8492 ql_tgt_t *tq;
8493 ql_adapter_state_t *ha = sp->ha, *pha = sp->ha->pha;
8495 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8497 ubp = ha->ub_array[sp->handle];
8498 r_ctl = ubp->ub_frame.r_ctl;
8499 ls_code = ubp->ub_buffer[0];
8501 if (sp->lun_queue == NULL) {
8502 tq = NULL;
8503 } else {
8504 tq = sp->lun_queue->target_queue;
8507 QL_UB_LOCK(ha);
8508 if (sp->flags & SRB_UB_FREE_REQUESTED ||
8509 pha->task_daemon_flags & TASK_DAEMON_POWERING_DOWN) {
8510 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
8511 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
8512 sp->flags |= SRB_UB_IN_FCA;
8513 QL_UB_UNLOCK(ha);
8514 return;
8517 /* Process RSCN */
8518 if (sp->flags & SRB_UB_RSCN) {
8519 int sendup = 1;
8522 * Defer RSCN posting until commands return
8524 QL_UB_UNLOCK(ha);
8526 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8528 /* Abort outstanding commands */
8529 sendup = ql_process_rscn(ha, af);
8530 if (sendup == 0) {
8532 TASK_DAEMON_LOCK(ha);
8533 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8534 TASK_DAEMON_UNLOCK(ha);
8537 * Wait for commands to drain in F/W (doesn't take
8538 * more than a few milliseconds)
8540 ql_delay(ha, 10000);
8542 QL_PRINT_2(CE_CONT, "(%d,%d): done rscn_sendup=0, "
8543 "fmt=%xh, d_id=%xh\n", ha->instance, ha->vp_index,
8544 af->aff_format, af->aff_d_id);
8545 return;
8548 QL_UB_LOCK(ha);
8550 EL(ha, "sending unsol rscn, fmt=%xh, d_id=%xh to transport\n",
8551 af->aff_format, af->aff_d_id);
8554 /* Process UNSOL LOGO */
8555 if ((r_ctl == R_CTL_ELS_REQ) && (ls_code == LA_ELS_LOGO)) {
8556 QL_UB_UNLOCK(ha);
8558 if (tq && (ql_process_logo_for_device(ha, tq) == 0)) {
8559 TASK_DAEMON_LOCK(ha);
8560 ql_add_link_b(&pha->callback_queue, &sp->cmd);
8561 TASK_DAEMON_UNLOCK(ha);
8562 QL_PRINT_2(CE_CONT, "(%d,%d): logo_sendup=0, d_id=%xh"
8563 "\n", ha->instance, ha->vp_index, tq->d_id.b24);
8564 return;
8567 QL_UB_LOCK(ha);
8568 EL(ha, "sending unsol logout for %xh to transport\n",
8569 ubp->ub_frame.s_id);
8572 sp->flags &= ~(SRB_UB_IN_FCA | SRB_UB_IN_ISP | SRB_UB_RSCN |
8573 SRB_UB_FCP);
8575 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
8576 (void) ddi_dma_sync(sp->ub_buffer.dma_handle, 0,
8577 ubp->ub_bufsize, DDI_DMA_SYNC_FORCPU);
8579 QL_UB_UNLOCK(ha);
8581 (ha->bind_info.port_unsol_cb)(ha->bind_info.port_handle,
8582 ubp, sp->ub_type);
8584 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8588 * ql_send_logo
8590 * Input:
8591 * ha: adapter state pointer.
8592 * tq: target queue pointer.
8593 * done_q: done queue pointer.
8595 * Context:
8596 * Interrupt or Kernel context, no mailbox commands allowed.
8598 void
8599 ql_send_logo(ql_adapter_state_t *vha, ql_tgt_t *tq, ql_head_t *done_q)
8601 fc_unsol_buf_t *ubp;
8602 ql_srb_t *sp;
8603 la_els_logo_t *payload;
8604 ql_adapter_state_t *ha = vha->pha;
8606 QL_PRINT_3(CE_CONT, "(%d): started, d_id=%xh\n", ha->instance,
8607 tq->d_id.b24);
8609 if ((tq->d_id.b24 == 0) || (tq->d_id.b24 == 0xffffff)) {
8610 EL(ha, "no device, d_id=%xh\n", tq->d_id.b24);
8611 return;
8614 if ((tq->flags & (TQF_RSCN_RCVD | TQF_PLOGI_PROGRS)) == 0 &&
8615 tq->logout_sent == 0 && (ha->task_daemon_flags & LOOP_DOWN) == 0) {
8617 /* Locate a buffer to use. */
8618 ubp = ql_get_unsolicited_buffer(vha, FC_TYPE_EXTENDED_LS);
8619 if (ubp == NULL) {
8620 EL(vha, "Failed, get_unsolicited_buffer\n");
8621 return;
8624 DEVICE_QUEUE_LOCK(tq);
8625 tq->flags |= TQF_NEED_AUTHENTICATION;
8626 tq->logout_sent++;
8627 DEVICE_QUEUE_UNLOCK(tq);
8629 EL(vha, "Received LOGO from = %xh\n", tq->d_id.b24);
8631 sp = ubp->ub_fca_private;
8633 /* Set header. */
8634 ubp->ub_frame.d_id = vha->d_id.b24;
8635 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8636 ubp->ub_frame.s_id = tq->d_id.b24;
8637 ubp->ub_frame.rsvd = 0;
8638 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8639 F_CTL_SEQ_INITIATIVE;
8640 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8641 ubp->ub_frame.seq_cnt = 0;
8642 ubp->ub_frame.df_ctl = 0;
8643 ubp->ub_frame.seq_id = 0;
8644 ubp->ub_frame.rx_id = 0xffff;
8645 ubp->ub_frame.ox_id = 0xffff;
8647 /* set payload. */
8648 payload = (la_els_logo_t *)ubp->ub_buffer;
8649 bzero(payload, sizeof (la_els_logo_t));
8650 /* Make sure ls_code in payload is always big endian */
8651 ubp->ub_buffer[0] = LA_ELS_LOGO;
8652 ubp->ub_buffer[1] = 0;
8653 ubp->ub_buffer[2] = 0;
8654 ubp->ub_buffer[3] = 0;
8655 bcopy(&vha->loginparams.node_ww_name.raw_wwn[0],
8656 &payload->nport_ww_name.raw_wwn[0], 8);
8657 payload->nport_id.port_id = tq->d_id.b24;
8659 QL_UB_LOCK(ha);
8660 sp->flags |= SRB_UB_CALLBACK;
8661 QL_UB_UNLOCK(ha);
8662 if (tq->lun_queues.first != NULL) {
8663 sp->lun_queue = (tq->lun_queues.first)->base_address;
8664 } else {
8665 sp->lun_queue = ql_lun_queue(vha, tq, 0);
8667 if (done_q) {
8668 ql_add_link_b(done_q, &sp->cmd);
8669 } else {
8670 ql_awaken_task_daemon(ha, sp, 0, 0);
8674 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8677 static int
8678 ql_process_logo_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
8680 port_id_t d_id;
8681 ql_srb_t *sp;
8682 ql_link_t *link;
8683 int sendup = 1;
8685 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8687 DEVICE_QUEUE_LOCK(tq);
8688 if (tq->outcnt) {
8689 DEVICE_QUEUE_UNLOCK(tq);
8690 sendup = 0;
8691 (void) ql_abort_device(ha, tq, 1);
8692 ql_delay(ha, 10000);
8693 } else {
8694 DEVICE_QUEUE_UNLOCK(tq);
8695 TASK_DAEMON_LOCK(ha);
8697 for (link = ha->pha->callback_queue.first; link != NULL;
8698 link = link->next) {
8699 sp = link->base_address;
8700 if (sp->flags & SRB_UB_CALLBACK) {
8701 continue;
8703 d_id.b24 = sp->pkt->pkt_cmd_fhdr.d_id;
8705 if (tq->d_id.b24 == d_id.b24) {
8706 sendup = 0;
8707 break;
8711 TASK_DAEMON_UNLOCK(ha);
8714 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8716 return (sendup);
8719 static int
8720 ql_send_plogi(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_head_t *done_q)
8722 fc_unsol_buf_t *ubp;
8723 ql_srb_t *sp;
8724 la_els_logi_t *payload;
8725 class_svc_param_t *class3_param;
8727 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8729 if ((tq->flags & TQF_RSCN_RCVD) || (ha->task_daemon_flags &
8730 LOOP_DOWN)) {
8731 EL(ha, "Failed, tqf=%xh\n", tq->flags);
8732 return (QL_FUNCTION_FAILED);
8735 /* Locate a buffer to use. */
8736 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8737 if (ubp == NULL) {
8738 EL(ha, "Failed\n");
8739 return (QL_FUNCTION_FAILED);
8742 QL_PRINT_3(CE_CONT, "(%d): Received LOGO from = %xh\n",
8743 ha->instance, tq->d_id.b24);
8745 EL(ha, "Emulate PLOGI from = %xh tq = %x\n", tq->d_id.b24, tq);
8747 sp = ubp->ub_fca_private;
8749 /* Set header. */
8750 ubp->ub_frame.d_id = ha->d_id.b24;
8751 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8752 ubp->ub_frame.s_id = tq->d_id.b24;
8753 ubp->ub_frame.rsvd = 0;
8754 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8755 F_CTL_SEQ_INITIATIVE;
8756 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8757 ubp->ub_frame.seq_cnt = 0;
8758 ubp->ub_frame.df_ctl = 0;
8759 ubp->ub_frame.seq_id = 0;
8760 ubp->ub_frame.rx_id = 0xffff;
8761 ubp->ub_frame.ox_id = 0xffff;
8763 /* set payload. */
8764 payload = (la_els_logi_t *)ubp->ub_buffer;
8765 bzero(payload, sizeof (payload));
8767 payload->ls_code.ls_code = LA_ELS_PLOGI;
8768 payload->common_service.fcph_version = 0x2006;
8769 payload->common_service.cmn_features = 0x8800;
8771 CFG_IST(ha, CFG_CTRL_24258081) ?
8772 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8773 ha->init_ctrl_blk.cb24.max_frame_length[0],
8774 ha->init_ctrl_blk.cb24.max_frame_length[1])) :
8775 (payload->common_service.rx_bufsize = CHAR_TO_SHORT(
8776 ha->init_ctrl_blk.cb.max_frame_length[0],
8777 ha->init_ctrl_blk.cb.max_frame_length[1]));
8779 payload->common_service.conc_sequences = 0xff;
8780 payload->common_service.relative_offset = 0x03;
8781 payload->common_service.e_d_tov = 0x7d0;
8783 bcopy((void *)&tq->port_name[0],
8784 (void *)&payload->nport_ww_name.raw_wwn[0], 8);
8786 bcopy((void *)&tq->node_name[0],
8787 (void *)&payload->node_ww_name.raw_wwn[0], 8);
8789 class3_param = (class_svc_param_t *)&payload->class_3;
8790 class3_param->class_valid_svc_opt = 0x8000;
8791 class3_param->recipient_ctl = tq->class3_recipient_ctl;
8792 class3_param->rcv_data_size = tq->class3_rcv_data_size;
8793 class3_param->conc_sequences = tq->class3_conc_sequences;
8794 class3_param->open_sequences_per_exch =
8795 tq->class3_open_sequences_per_exch;
8797 QL_UB_LOCK(ha);
8798 sp->flags |= SRB_UB_CALLBACK;
8799 QL_UB_UNLOCK(ha);
8801 ql_isp_els_handle_endian(ha, (uint8_t *)payload, LA_ELS_PLOGI);
8803 if (done_q) {
8804 ql_add_link_b(done_q, &sp->cmd);
8805 } else {
8806 ql_awaken_task_daemon(ha, sp, 0, 0);
8809 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8811 return (QL_SUCCESS);
8815 * Abort outstanding commands in the Firmware, clear internally
8816 * queued commands in the driver, Synchronize the target with
8817 * the Firmware
8820 ql_abort_device(ql_adapter_state_t *ha, ql_tgt_t *tq, int drain)
8822 ql_link_t *link, *link2;
8823 ql_lun_t *lq;
8824 int rval = QL_SUCCESS;
8825 ql_srb_t *sp;
8826 ql_head_t done_q = { NULL, NULL };
8828 QL_PRINT_10(CE_CONT, "(%d,%d): started\n", ha->instance, ha->vp_index);
8831 * First clear, internally queued commands
8833 DEVICE_QUEUE_LOCK(tq);
8834 for (link = tq->lun_queues.first; link != NULL; link = link->next) {
8835 lq = link->base_address;
8837 link2 = lq->cmd.first;
8838 while (link2 != NULL) {
8839 sp = link2->base_address;
8840 link2 = link2->next;
8842 if (sp->flags & SRB_ABORT) {
8843 continue;
8846 /* Remove srb from device command queue. */
8847 ql_remove_link(&lq->cmd, &sp->cmd);
8848 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
8850 /* Set ending status. */
8851 sp->pkt->pkt_reason = CS_ABORTED;
8853 /* Call done routine to handle completions. */
8854 ql_add_link_b(&done_q, &sp->cmd);
8857 DEVICE_QUEUE_UNLOCK(tq);
8859 if (done_q.first != NULL) {
8860 ql_done(done_q.first);
8863 if (drain && VALID_TARGET_ID(ha, tq->loop_id) && PD_PORT_LOGIN(tq)) {
8864 rval = ql_abort_target(ha, tq, 0);
8867 if (rval != QL_SUCCESS) {
8868 EL(ha, "failed=%xh, d_id=%xh\n", rval, tq->d_id.b24);
8869 } else {
8870 /*EMPTY*/
8871 QL_PRINT_10(CE_CONT, "(%d,%d): done\n", ha->instance,
8872 ha->vp_index);
8875 return (rval);
8879 * ql_rcv_rscn_els
8880 * Processes received RSCN extended link service.
8882 * Input:
8883 * ha: adapter state pointer.
8884 * mb: array containing input mailbox registers.
8885 * done_q: done queue pointer.
8887 * Context:
8888 * Interrupt or Kernel context, no mailbox commands allowed.
8890 void
8891 ql_rcv_rscn_els(ql_adapter_state_t *ha, uint16_t *mb, ql_head_t *done_q)
8893 fc_unsol_buf_t *ubp;
8894 ql_srb_t *sp;
8895 fc_rscn_t *rn;
8896 fc_affected_id_t *af;
8897 port_id_t d_id;
8899 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8901 /* Locate a buffer to use. */
8902 ubp = ql_get_unsolicited_buffer(ha, FC_TYPE_EXTENDED_LS);
8903 if (ubp != NULL) {
8904 sp = ubp->ub_fca_private;
8906 /* Set header. */
8907 ubp->ub_frame.d_id = ha->d_id.b24;
8908 ubp->ub_frame.r_ctl = R_CTL_ELS_REQ;
8909 ubp->ub_frame.s_id = FS_FABRIC_CONTROLLER;
8910 ubp->ub_frame.rsvd = 0;
8911 ubp->ub_frame.f_ctl = F_CTL_FIRST_SEQ | F_CTL_END_SEQ |
8912 F_CTL_SEQ_INITIATIVE;
8913 ubp->ub_frame.type = FC_TYPE_EXTENDED_LS;
8914 ubp->ub_frame.seq_cnt = 0;
8915 ubp->ub_frame.df_ctl = 0;
8916 ubp->ub_frame.seq_id = 0;
8917 ubp->ub_frame.rx_id = 0xffff;
8918 ubp->ub_frame.ox_id = 0xffff;
8920 /* set payload. */
8921 rn = (fc_rscn_t *)ubp->ub_buffer;
8922 af = (fc_affected_id_t *)((caddr_t)ubp->ub_buffer + 4);
8924 rn->rscn_code = LA_ELS_RSCN;
8925 rn->rscn_len = 4;
8926 rn->rscn_payload_len = 8;
8927 d_id.b.al_pa = LSB(mb[2]);
8928 d_id.b.area = MSB(mb[2]);
8929 d_id.b.domain = LSB(mb[1]);
8930 af->aff_d_id = d_id.b24;
8931 af->aff_format = MSB(mb[1]);
8933 EL(ha, "LA_ELS_RSCN fmt=%xh, d_id=%xh\n", af->aff_format,
8934 af->aff_d_id);
8936 ql_update_rscn(ha, af);
8938 QL_UB_LOCK(ha);
8939 sp->flags |= SRB_UB_CALLBACK | SRB_UB_RSCN;
8940 QL_UB_UNLOCK(ha);
8941 ql_add_link_b(done_q, &sp->cmd);
8944 if (ubp == NULL) {
8945 EL(ha, "Failed, get_unsolicited_buffer\n");
8946 } else {
8947 /*EMPTY*/
8948 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
8953 * ql_update_rscn
8954 * Update devices from received RSCN.
8956 * Input:
8957 * ha: adapter state pointer.
8958 * af: pointer to RSCN data.
8960 * Context:
8961 * Interrupt or Kernel context, no mailbox commands allowed.
8963 static void
8964 ql_update_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
8966 ql_link_t *link;
8967 uint16_t index;
8968 ql_tgt_t *tq;
8970 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
8972 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
8973 port_id_t d_id;
8975 d_id.r.rsvd_1 = 0;
8976 d_id.b24 = af->aff_d_id;
8978 tq = ql_d_id_to_queue(ha, d_id);
8979 if (tq) {
8980 EL(ha, "SD_RSCN_RCVD %xh RPA\n", d_id.b24);
8981 DEVICE_QUEUE_LOCK(tq);
8982 tq->flags |= TQF_RSCN_RCVD;
8983 DEVICE_QUEUE_UNLOCK(tq);
8985 QL_PRINT_3(CE_CONT, "(%d): FC_RSCN_PORT_ADDRESS done\n",
8986 ha->instance);
8988 return;
8991 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
8992 for (link = ha->dev[index].first; link != NULL;
8993 link = link->next) {
8994 tq = link->base_address;
8996 switch (af->aff_format) {
8997 case FC_RSCN_FABRIC_ADDRESS:
8998 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
8999 EL(ha, "SD_RSCN_RCVD %xh RFA\n",
9000 tq->d_id.b24);
9001 DEVICE_QUEUE_LOCK(tq);
9002 tq->flags |= TQF_RSCN_RCVD;
9003 DEVICE_QUEUE_UNLOCK(tq);
9005 break;
9007 case FC_RSCN_AREA_ADDRESS:
9008 if ((tq->d_id.b24 & 0xffff00) == af->aff_d_id) {
9009 EL(ha, "SD_RSCN_RCVD %xh RAA\n",
9010 tq->d_id.b24);
9011 DEVICE_QUEUE_LOCK(tq);
9012 tq->flags |= TQF_RSCN_RCVD;
9013 DEVICE_QUEUE_UNLOCK(tq);
9015 break;
9017 case FC_RSCN_DOMAIN_ADDRESS:
9018 if ((tq->d_id.b24 & 0xff0000) == af->aff_d_id) {
9019 EL(ha, "SD_RSCN_RCVD %xh RDA\n",
9020 tq->d_id.b24);
9021 DEVICE_QUEUE_LOCK(tq);
9022 tq->flags |= TQF_RSCN_RCVD;
9023 DEVICE_QUEUE_UNLOCK(tq);
9025 break;
9027 default:
9028 break;
9032 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9036 * ql_process_rscn
9038 * Input:
9039 * ha: adapter state pointer.
9040 * af: RSCN payload pointer.
9042 * Context:
9043 * Kernel context.
9045 static int
9046 ql_process_rscn(ql_adapter_state_t *ha, fc_affected_id_t *af)
9048 int sendit;
9049 int sendup = 1;
9050 ql_link_t *link;
9051 uint16_t index;
9052 ql_tgt_t *tq;
9054 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9056 if (af->aff_format == FC_RSCN_PORT_ADDRESS) {
9057 port_id_t d_id;
9059 d_id.r.rsvd_1 = 0;
9060 d_id.b24 = af->aff_d_id;
9062 tq = ql_d_id_to_queue(ha, d_id);
9063 if (tq) {
9064 sendup = ql_process_rscn_for_device(ha, tq);
9067 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9069 return (sendup);
9072 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9073 for (link = ha->dev[index].first; link != NULL;
9074 link = link->next) {
9076 tq = link->base_address;
9077 if (tq == NULL) {
9078 continue;
9081 switch (af->aff_format) {
9082 case FC_RSCN_FABRIC_ADDRESS:
9083 if (!RESERVED_LOOP_ID(ha, tq->loop_id)) {
9084 sendit = ql_process_rscn_for_device(
9085 ha, tq);
9086 if (sendup) {
9087 sendup = sendit;
9090 break;
9092 case FC_RSCN_AREA_ADDRESS:
9093 if ((tq->d_id.b24 & 0xffff00) ==
9094 af->aff_d_id) {
9095 sendit = ql_process_rscn_for_device(
9096 ha, tq);
9098 if (sendup) {
9099 sendup = sendit;
9102 break;
9104 case FC_RSCN_DOMAIN_ADDRESS:
9105 if ((tq->d_id.b24 & 0xff0000) ==
9106 af->aff_d_id) {
9107 sendit = ql_process_rscn_for_device(
9108 ha, tq);
9110 if (sendup) {
9111 sendup = sendit;
9114 break;
9116 default:
9117 break;
9122 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9124 return (sendup);
9128 * ql_process_rscn_for_device
9130 * Input:
9131 * ha: adapter state pointer.
9132 * tq: target queue pointer.
9134 * Context:
9135 * Kernel context.
9137 static int
9138 ql_process_rscn_for_device(ql_adapter_state_t *ha, ql_tgt_t *tq)
9140 int sendup = 1;
9142 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9144 DEVICE_QUEUE_LOCK(tq);
9147 * Let FCP-2 compliant devices continue I/Os
9148 * with their low level recoveries.
9150 if (((tq->flags & TQF_INITIATOR_DEVICE) == 0) &&
9151 (tq->prli_svc_param_word_3 & PRLI_W3_RETRY)) {
9153 * Cause ADISC to go out
9155 DEVICE_QUEUE_UNLOCK(tq);
9157 (void) ql_get_port_database(ha, tq, PDF_NONE);
9159 DEVICE_QUEUE_LOCK(tq);
9160 tq->flags &= ~TQF_RSCN_RCVD;
9162 } else if (tq->loop_id != PORT_NO_LOOP_ID) {
9163 if (tq->d_id.b24 != BROADCAST_ADDR) {
9164 tq->flags |= TQF_NEED_AUTHENTICATION;
9167 DEVICE_QUEUE_UNLOCK(tq);
9169 (void) ql_abort_device(ha, tq, 1);
9171 DEVICE_QUEUE_LOCK(tq);
9173 if (tq->outcnt) {
9174 sendup = 0;
9175 } else {
9176 tq->flags &= ~TQF_RSCN_RCVD;
9178 } else {
9179 tq->flags &= ~TQF_RSCN_RCVD;
9182 if (sendup) {
9183 if (tq->d_id.b24 != BROADCAST_ADDR) {
9184 tq->flags |= TQF_NEED_AUTHENTICATION;
9188 DEVICE_QUEUE_UNLOCK(tq);
9190 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9192 return (sendup);
9195 static int
9196 ql_handle_rscn_update(ql_adapter_state_t *ha)
9198 int rval;
9199 ql_tgt_t *tq;
9200 uint16_t index, loop_id;
9201 ql_dev_id_list_t *list;
9202 uint32_t list_size;
9203 port_id_t d_id;
9204 ql_mbx_data_t mr;
9205 ql_head_t done_q = { NULL, NULL };
9207 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9209 list_size = sizeof (ql_dev_id_list_t) * DEVICE_LIST_ENTRIES;
9210 list = kmem_zalloc(list_size, KM_SLEEP);
9211 if (list == NULL) {
9212 rval = QL_MEMORY_ALLOC_FAILED;
9213 EL(ha, "kmem_zalloc failed=%xh\n", rval);
9214 return (rval);
9218 * Get data from RISC code d_id list to init each device queue.
9220 rval = ql_get_id_list(ha, (caddr_t)list, list_size, &mr);
9221 if (rval != QL_SUCCESS) {
9222 kmem_free(list, list_size);
9223 EL(ha, "get_id_list failed=%xh\n", rval);
9224 return (rval);
9227 /* Acquire adapter state lock. */
9228 ADAPTER_STATE_LOCK(ha);
9230 /* Check for new devices */
9231 for (index = 0; index < mr.mb[1]; index++) {
9232 ql_dev_list(ha, list, index, &d_id, &loop_id);
9234 if (VALID_DEVICE_ID(ha, loop_id)) {
9235 d_id.r.rsvd_1 = 0;
9237 tq = ql_d_id_to_queue(ha, d_id);
9238 if (tq != NULL) {
9239 continue;
9242 tq = ql_dev_init(ha, d_id, loop_id);
9244 /* Test for fabric device. */
9245 if (d_id.b.domain != ha->d_id.b.domain ||
9246 d_id.b.area != ha->d_id.b.area) {
9247 tq->flags |= TQF_FABRIC_DEVICE;
9250 ADAPTER_STATE_UNLOCK(ha);
9251 if (ql_get_port_database(ha, tq, PDF_NONE) !=
9252 QL_SUCCESS) {
9253 tq->loop_id = PORT_NO_LOOP_ID;
9255 ADAPTER_STATE_LOCK(ha);
9258 * Send up a PLOGI about the new device
9260 if (VALID_DEVICE_ID(ha, tq->loop_id)) {
9261 (void) ql_send_plogi(ha, tq, &done_q);
9266 /* Release adapter state lock. */
9267 ADAPTER_STATE_UNLOCK(ha);
9269 if (done_q.first != NULL) {
9270 ql_done(done_q.first);
9273 kmem_free(list, list_size);
9275 if (rval != QL_SUCCESS) {
9276 EL(ha, "failed=%xh\n", rval);
9277 } else {
9278 /*EMPTY*/
9279 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9282 return (rval);
9286 * ql_free_unsolicited_buffer
9287 * Frees allocated buffer.
9289 * Input:
9290 * ha = adapter state pointer.
9291 * index = buffer array index.
9292 * ADAPTER_STATE_LOCK must be already obtained.
9294 * Context:
9295 * Kernel context.
9297 static void
9298 ql_free_unsolicited_buffer(ql_adapter_state_t *ha, fc_unsol_buf_t *ubp)
9300 ql_srb_t *sp;
9301 int status;
9303 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9305 sp = ubp->ub_fca_private;
9306 if (sp->ub_type == FC_TYPE_IS8802_SNAP) {
9307 /* Disconnect IP from system buffers. */
9308 if (ha->flags & IP_INITIALIZED) {
9309 ADAPTER_STATE_UNLOCK(ha);
9310 status = ql_shutdown_ip(ha);
9311 ADAPTER_STATE_LOCK(ha);
9312 if (status != QL_SUCCESS) {
9313 cmn_err(CE_WARN,
9314 "!Qlogic %s(%d): Failed to shutdown IP",
9315 QL_NAME, ha->instance);
9316 return;
9319 ha->flags &= ~IP_ENABLED;
9322 ql_free_phys(ha, &sp->ub_buffer);
9323 } else {
9324 kmem_free(ubp->ub_buffer, ubp->ub_bufsize);
9327 kmem_free(sp, sizeof (ql_srb_t));
9328 kmem_free(ubp, sizeof (fc_unsol_buf_t));
9330 if (ha->ub_allocated != 0) {
9331 ha->ub_allocated--;
9334 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9338 * ql_get_unsolicited_buffer
9339 * Locates a free unsolicited buffer.
9341 * Input:
9342 * ha = adapter state pointer.
9343 * type = buffer type.
9345 * Returns:
9346 * Unsolicited buffer pointer.
9348 * Context:
9349 * Interrupt or Kernel context, no mailbox commands allowed.
9351 fc_unsol_buf_t *
9352 ql_get_unsolicited_buffer(ql_adapter_state_t *ha, uint32_t type)
9354 fc_unsol_buf_t *ubp;
9355 ql_srb_t *sp;
9356 uint16_t index;
9358 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9360 /* Locate a buffer to use. */
9361 ubp = NULL;
9363 QL_UB_LOCK(ha);
9364 for (index = 0; index < QL_UB_LIMIT; index++) {
9365 ubp = ha->ub_array[index];
9366 if (ubp != NULL) {
9367 sp = ubp->ub_fca_private;
9368 if ((sp->ub_type == type) &&
9369 (sp->flags & SRB_UB_IN_FCA) &&
9370 (!(sp->flags & (SRB_UB_CALLBACK |
9371 SRB_UB_FREE_REQUESTED | SRB_UB_ACQUIRED)))) {
9372 sp->flags |= SRB_UB_ACQUIRED;
9373 ubp->ub_resp_flags = 0;
9374 break;
9376 ubp = NULL;
9379 QL_UB_UNLOCK(ha);
9381 if (ubp) {
9382 ubp->ub_resp_token = NULL;
9383 ubp->ub_class = FC_TRAN_CLASS3;
9386 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9388 return (ubp);
9392 * ql_ub_frame_hdr
9393 * Processes received unsolicited buffers from ISP.
9395 * Input:
9396 * ha: adapter state pointer.
9397 * tq: target queue pointer.
9398 * index: unsolicited buffer array index.
9399 * done_q: done queue pointer.
9401 * Returns:
9402 * ql local function return status code.
9404 * Context:
9405 * Interrupt or Kernel context, no mailbox commands allowed.
9408 ql_ub_frame_hdr(ql_adapter_state_t *ha, ql_tgt_t *tq, uint16_t index,
9409 ql_head_t *done_q)
9411 fc_unsol_buf_t *ubp;
9412 ql_srb_t *sp;
9413 uint16_t loop_id;
9414 int rval = QL_FUNCTION_FAILED;
9416 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9418 QL_UB_LOCK(ha);
9419 if (index >= QL_UB_LIMIT || (ubp = ha->ub_array[index]) == NULL) {
9420 EL(ha, "Invalid buffer index=%xh\n", index);
9421 QL_UB_UNLOCK(ha);
9422 return (rval);
9425 sp = ubp->ub_fca_private;
9426 if (sp->flags & SRB_UB_FREE_REQUESTED) {
9427 EL(ha, "buffer freed index=%xh\n", index);
9428 sp->flags &= ~(SRB_UB_IN_ISP | SRB_UB_CALLBACK |
9429 SRB_UB_RSCN | SRB_UB_FCP | SRB_UB_ACQUIRED);
9431 sp->flags |= SRB_UB_IN_FCA;
9433 QL_UB_UNLOCK(ha);
9434 return (rval);
9437 if ((sp->handle == index) &&
9438 (sp->flags & SRB_UB_IN_ISP) &&
9439 (sp->ub_type == FC_TYPE_IS8802_SNAP) &&
9440 (!(sp->flags & SRB_UB_ACQUIRED))) {
9441 /* set broadcast D_ID */
9442 loop_id = (uint16_t)(CFG_IST(ha, CFG_CTRL_24258081) ?
9443 BROADCAST_24XX_HDL : IP_BROADCAST_LOOP_ID);
9444 if (tq->ub_loop_id == loop_id) {
9445 if (ha->topology & QL_FL_PORT) {
9446 ubp->ub_frame.d_id = 0x000000;
9447 } else {
9448 ubp->ub_frame.d_id = 0xffffff;
9450 } else {
9451 ubp->ub_frame.d_id = ha->d_id.b24;
9453 ubp->ub_frame.r_ctl = R_CTL_UNSOL_DATA;
9454 ubp->ub_frame.rsvd = 0;
9455 ubp->ub_frame.s_id = tq->d_id.b24;
9456 ubp->ub_frame.type = FC_TYPE_IS8802_SNAP;
9457 ubp->ub_frame.seq_cnt = tq->ub_seq_cnt;
9458 ubp->ub_frame.df_ctl = 0;
9459 ubp->ub_frame.seq_id = tq->ub_seq_id;
9460 ubp->ub_frame.rx_id = 0xffff;
9461 ubp->ub_frame.ox_id = 0xffff;
9462 ubp->ub_bufsize = sp->ub_size < tq->ub_sequence_length ?
9463 sp->ub_size : tq->ub_sequence_length;
9464 ubp->ub_frame.ro = tq->ub_frame_ro;
9466 tq->ub_sequence_length = (uint16_t)
9467 (tq->ub_sequence_length - ubp->ub_bufsize);
9468 tq->ub_frame_ro += ubp->ub_bufsize;
9469 tq->ub_seq_cnt++;
9471 if (tq->ub_seq_cnt == tq->ub_total_seg_cnt) {
9472 if (tq->ub_seq_cnt == 1) {
9473 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9474 F_CTL_FIRST_SEQ | F_CTL_END_SEQ;
9475 } else {
9476 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9477 F_CTL_END_SEQ;
9479 tq->ub_total_seg_cnt = 0;
9480 } else if (tq->ub_seq_cnt == 1) {
9481 ubp->ub_frame.f_ctl = F_CTL_RO_PRESENT |
9482 F_CTL_FIRST_SEQ;
9483 ubp->ub_frame.df_ctl = 0x20;
9486 QL_PRINT_3(CE_CONT, "(%d): ub_frame.d_id=%xh\n",
9487 ha->instance, ubp->ub_frame.d_id);
9488 QL_PRINT_3(CE_CONT, "(%d): ub_frame.s_id=%xh\n",
9489 ha->instance, ubp->ub_frame.s_id);
9490 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_cnt=%xh\n",
9491 ha->instance, ubp->ub_frame.seq_cnt);
9492 QL_PRINT_3(CE_CONT, "(%d): ub_frame.seq_id=%xh\n",
9493 ha->instance, ubp->ub_frame.seq_id);
9494 QL_PRINT_3(CE_CONT, "(%d): ub_frame.ro=%xh\n",
9495 ha->instance, ubp->ub_frame.ro);
9496 QL_PRINT_3(CE_CONT, "(%d): ub_frame.f_ctl=%xh\n",
9497 ha->instance, ubp->ub_frame.f_ctl);
9498 QL_PRINT_3(CE_CONT, "(%d): ub_bufsize=%xh\n",
9499 ha->instance, ubp->ub_bufsize);
9500 QL_DUMP_3(ubp->ub_buffer, 8,
9501 ubp->ub_bufsize < 64 ? ubp->ub_bufsize : 64);
9503 sp->flags |= SRB_UB_CALLBACK | SRB_UB_ACQUIRED;
9504 ql_add_link_b(done_q, &sp->cmd);
9505 rval = QL_SUCCESS;
9506 } else {
9507 if (sp->handle != index) {
9508 EL(ha, "Bad index=%xh, expect=%xh\n", index,
9509 sp->handle);
9511 if ((sp->flags & SRB_UB_IN_ISP) == 0) {
9512 EL(ha, "buffer was already in driver, index=%xh\n",
9513 index);
9515 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) == 0) {
9516 EL(ha, "buffer was not an IP buffer, index=%xh\n",
9517 index);
9519 if (sp->flags & SRB_UB_ACQUIRED) {
9520 EL(ha, "buffer was being used by driver, index=%xh\n",
9521 index);
9524 QL_UB_UNLOCK(ha);
9526 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9528 return (rval);
9532 * ql_timer
9533 * One second timer function.
9535 * Input:
9536 * ql_hba.first = first link in adapter list.
9538 * Context:
9539 * Interrupt context, no mailbox commands allowed.
9541 static void
9542 ql_timer(void *arg)
9544 ql_link_t *link;
9545 uint32_t set_flags;
9546 uint32_t reset_flags;
9547 ql_adapter_state_t *ha = NULL, *vha;
9549 QL_PRINT_6(CE_CONT, "started\n");
9551 /* Acquire global state lock. */
9552 GLOBAL_STATE_LOCK();
9553 if (ql_timer_timeout_id == NULL) {
9554 /* Release global state lock. */
9555 GLOBAL_STATE_UNLOCK();
9556 return;
9559 for (link = ql_hba.first; link != NULL; link = link->next) {
9560 ha = link->base_address;
9562 /* Skip adapter if suspended of stalled. */
9563 ADAPTER_STATE_LOCK(ha);
9564 if (ha->flags & ADAPTER_SUSPENDED ||
9565 ha->task_daemon_flags & DRIVER_STALL) {
9566 ADAPTER_STATE_UNLOCK(ha);
9567 continue;
9569 ha->flags |= ADAPTER_TIMER_BUSY;
9570 ADAPTER_STATE_UNLOCK(ha);
9572 QL_PM_LOCK(ha);
9573 if (ha->power_level != PM_LEVEL_D0) {
9574 QL_PM_UNLOCK(ha);
9576 ADAPTER_STATE_LOCK(ha);
9577 ha->flags &= ~ADAPTER_TIMER_BUSY;
9578 ADAPTER_STATE_UNLOCK(ha);
9579 continue;
9581 ha->busy++;
9582 QL_PM_UNLOCK(ha);
9584 set_flags = 0;
9585 reset_flags = 0;
9587 /* Port retry timer handler. */
9588 if (LOOP_READY(ha)) {
9589 ADAPTER_STATE_LOCK(ha);
9590 if (ha->port_retry_timer != 0) {
9591 ha->port_retry_timer--;
9592 if (ha->port_retry_timer == 0) {
9593 set_flags |= PORT_RETRY_NEEDED;
9596 ADAPTER_STATE_UNLOCK(ha);
9599 /* Loop down timer handler. */
9600 if (LOOP_RECONFIGURE(ha) == 0) {
9601 if (ha->loop_down_timer > LOOP_DOWN_TIMER_END) {
9602 ha->loop_down_timer--;
9604 * give the firmware loop down dump flag
9605 * a chance to work.
9607 if (ha->loop_down_timer == LOOP_DOWN_RESET) {
9608 if (CFG_IST(ha,
9609 CFG_DUMP_LOOP_OFFLINE_TIMEOUT)) {
9610 (void) ql_binary_fw_dump(ha,
9611 TRUE);
9613 EL(ha, "loop_down_reset, "
9614 "isp_abort_needed\n");
9615 set_flags |= ISP_ABORT_NEEDED;
9618 if (CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) {
9619 /* Command abort time handler. */
9620 if (ha->loop_down_timer ==
9621 ha->loop_down_abort_time) {
9622 ADAPTER_STATE_LOCK(ha);
9623 ha->flags |= ABORT_CMDS_LOOP_DOWN_TMO;
9624 ADAPTER_STATE_UNLOCK(ha);
9625 set_flags |= ABORT_QUEUES_NEEDED;
9626 EL(ha, "loop_down_abort_time, "
9627 "abort_queues_needed\n");
9630 /* Watchdog timer handler. */
9631 if (ha->watchdog_timer == 0) {
9632 ha->watchdog_timer = WATCHDOG_TIME;
9633 } else if (LOOP_READY(ha)) {
9634 ha->watchdog_timer--;
9635 if (ha->watchdog_timer == 0) {
9636 for (vha = ha; vha != NULL;
9637 vha = vha->vp_next) {
9638 ql_watchdog(vha,
9639 &set_flags,
9640 &reset_flags);
9642 ha->watchdog_timer =
9643 WATCHDOG_TIME;
9649 /* Idle timer handler. */
9650 if (!DRIVER_SUSPENDED(ha)) {
9651 if (++ha->idle_timer >= IDLE_CHECK_TIMER) {
9652 #if defined(QL_DEBUG_LEVEL_6) || !defined(QL_DEBUG_LEVEL_3)
9653 set_flags |= TASK_DAEMON_IDLE_CHK_FLG;
9654 #endif
9655 ha->idle_timer = 0;
9657 if (ha->send_plogi_timer != 0) {
9658 ha->send_plogi_timer--;
9659 if (ha->send_plogi_timer == 0) {
9660 set_flags |= SEND_PLOGI;
9664 ADAPTER_STATE_LOCK(ha);
9665 if (ha->idc_restart_timer != 0) {
9666 ha->idc_restart_timer--;
9667 if (ha->idc_restart_timer == 0) {
9668 ha->idc_restart_cnt = 0;
9669 reset_flags |= DRIVER_STALL;
9672 if (ha->idc_flash_acc_timer != 0) {
9673 ha->idc_flash_acc_timer--;
9674 if (ha->idc_flash_acc_timer == 0 &&
9675 ha->idc_flash_acc != 0) {
9676 ha->idc_flash_acc = 1;
9677 ha->idc_mb[0] = MBA_IDC_NOTIFICATION;
9678 ha->idc_mb[1] = 0;
9679 ha->idc_mb[2] = IDC_OPC_DRV_START;
9680 set_flags |= IDC_EVENT;
9683 ADAPTER_STATE_UNLOCK(ha);
9685 if (set_flags != 0 || reset_flags != 0) {
9686 ql_awaken_task_daemon(ha, NULL, set_flags,
9687 reset_flags);
9690 if (ha->xioctl->ledstate.BeaconState == BEACON_ON) {
9691 ql_blink_led(ha);
9694 /* Update the IO stats */
9695 if (ha->xioctl->IOInputByteCnt >= 0x100000) {
9696 ha->xioctl->IOInputMByteCnt +=
9697 (ha->xioctl->IOInputByteCnt / 0x100000);
9698 ha->xioctl->IOInputByteCnt %= 0x100000;
9701 if (ha->xioctl->IOOutputByteCnt >= 0x100000) {
9702 ha->xioctl->IOOutputMByteCnt +=
9703 (ha->xioctl->IOOutputByteCnt / 0x100000);
9704 ha->xioctl->IOOutputByteCnt %= 0x100000;
9707 if (CFG_IST(ha, CFG_CTRL_8021)) {
9708 (void) ql_8021_idc_handler(ha);
9711 ADAPTER_STATE_LOCK(ha);
9712 ha->flags &= ~ADAPTER_TIMER_BUSY;
9713 ADAPTER_STATE_UNLOCK(ha);
9715 QL_PM_LOCK(ha);
9716 ha->busy--;
9717 QL_PM_UNLOCK(ha);
9720 /* Restart timer, if not being stopped. */
9721 if (ql_timer_timeout_id != NULL) {
9722 ql_timer_timeout_id = timeout(ql_timer, arg, ql_timer_ticks);
9725 /* Release global state lock. */
9726 GLOBAL_STATE_UNLOCK();
9728 QL_PRINT_6(CE_CONT, "done\n");
9732 * ql_timeout_insert
9733 * Function used to insert a command block onto the
9734 * watchdog timer queue.
9736 * Note: Must insure that pkt_time is not zero
9737 * before calling ql_timeout_insert.
9739 * Input:
9740 * ha: adapter state pointer.
9741 * tq: target queue pointer.
9742 * sp: SRB pointer.
9743 * DEVICE_QUEUE_LOCK must be already obtained.
9745 * Context:
9746 * Kernel context.
9748 /* ARGSUSED */
9749 static void
9750 ql_timeout_insert(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp)
9752 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9754 if (sp->pkt->pkt_timeout != 0 && sp->pkt->pkt_timeout < 0x10000) {
9755 sp->isp_timeout = (uint16_t)(sp->pkt->pkt_timeout);
9757 * The WATCHDOG_TIME must be rounded up + 1. As an example,
9758 * consider a 1 second timeout. If the WATCHDOG_TIME is 1, it
9759 * will expire in the next watchdog call, which could be in
9760 * 1 microsecond.
9763 sp->wdg_q_time = (sp->isp_timeout + WATCHDOG_TIME - 1) /
9764 WATCHDOG_TIME;
9766 * Added an additional 10 to account for the
9767 * firmware timer drift which can occur with
9768 * very long timeout values.
9770 sp->wdg_q_time += 10;
9773 * Add 6 more to insure watchdog does not timeout at the same
9774 * time as ISP RISC code timeout.
9776 sp->wdg_q_time += 6;
9778 /* Save initial time for resetting watchdog time. */
9779 sp->init_wdg_q_time = sp->wdg_q_time;
9781 /* Insert command onto watchdog queue. */
9782 ql_add_link_b(&tq->wdg, &sp->wdg);
9784 sp->flags |= SRB_WATCHDOG_ENABLED;
9785 } else {
9786 sp->isp_timeout = 0;
9787 sp->wdg_q_time = 0;
9788 sp->init_wdg_q_time = 0;
9791 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
9795 * ql_watchdog
9796 * Timeout handler that runs in interrupt context. The
9797 * ql_adapter_state_t * argument is the parameter set up when the
9798 * timeout was initialized (state structure pointer).
9799 * Function used to update timeout values and if timeout
9800 * has occurred command will be aborted.
9802 * Input:
9803 * ha: adapter state pointer.
9804 * set_flags: task daemon flags to set.
9805 * reset_flags: task daemon flags to reset.
9807 * Context:
9808 * Interrupt context, no mailbox commands allowed.
9810 static void
9811 ql_watchdog(ql_adapter_state_t *ha, uint32_t *set_flags, uint32_t *reset_flags)
9813 ql_srb_t *sp;
9814 ql_link_t *link;
9815 ql_link_t *next_cmd;
9816 ql_link_t *next_device;
9817 ql_tgt_t *tq;
9818 ql_lun_t *lq;
9819 uint16_t index;
9820 int q_sane;
9822 QL_PRINT_6(CE_CONT, "(%d): started\n", ha->instance);
9824 /* Loop through all targets. */
9825 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
9826 for (link = ha->dev[index].first; link != NULL;
9827 link = next_device) {
9828 tq = link->base_address;
9830 /* Try to acquire device queue lock. */
9831 if (TRY_DEVICE_QUEUE_LOCK(tq) == 0) {
9832 next_device = NULL;
9833 continue;
9836 next_device = link->next;
9838 if (!(CFG_IST(ha, CFG_ENABLE_LINK_DOWN_REPORTING)) &&
9839 (tq->port_down_retry_count == 0)) {
9840 /* Release device queue lock. */
9841 DEVICE_QUEUE_UNLOCK(tq);
9842 continue;
9845 /* Find out if this device is in a sane state. */
9846 if (tq->flags & (TQF_RSCN_RCVD |
9847 TQF_NEED_AUTHENTICATION | TQF_QUEUE_SUSPENDED)) {
9848 q_sane = 0;
9849 } else {
9850 q_sane = 1;
9852 /* Loop through commands on watchdog queue. */
9853 for (link = tq->wdg.first; link != NULL;
9854 link = next_cmd) {
9855 next_cmd = link->next;
9856 sp = link->base_address;
9857 lq = sp->lun_queue;
9860 * For SCSI commands, if everything seems to
9861 * be going fine and this packet is stuck
9862 * because of throttling at LUN or target
9863 * level then do not decrement the
9864 * sp->wdg_q_time
9866 if (ha->task_daemon_flags & STATE_ONLINE &&
9867 (sp->flags & SRB_ISP_STARTED) == 0 &&
9868 q_sane && sp->flags & SRB_FCP_CMD_PKT &&
9869 lq->lun_outcnt >= ha->execution_throttle) {
9870 continue;
9873 if (sp->wdg_q_time != 0) {
9874 sp->wdg_q_time--;
9876 /* Timeout? */
9877 if (sp->wdg_q_time != 0) {
9878 continue;
9881 ql_remove_link(&tq->wdg, &sp->wdg);
9882 sp->flags &= ~SRB_WATCHDOG_ENABLED;
9884 if (sp->flags & SRB_ISP_STARTED) {
9885 ql_cmd_timeout(ha, tq, sp,
9886 set_flags, reset_flags);
9888 DEVICE_QUEUE_UNLOCK(tq);
9889 tq = NULL;
9890 next_cmd = NULL;
9891 next_device = NULL;
9892 index = DEVICE_HEAD_LIST_SIZE;
9893 } else {
9894 ql_cmd_timeout(ha, tq, sp,
9895 set_flags, reset_flags);
9900 /* Release device queue lock. */
9901 if (tq != NULL) {
9902 DEVICE_QUEUE_UNLOCK(tq);
9907 QL_PRINT_6(CE_CONT, "(%d): done\n", ha->instance);
9911 * ql_cmd_timeout
9912 * Command timeout handler.
9914 * Input:
9915 * ha: adapter state pointer.
9916 * tq: target queue pointer.
9917 * sp: SRB pointer.
9918 * set_flags: task daemon flags to set.
9919 * reset_flags: task daemon flags to reset.
9921 * Context:
9922 * Interrupt context, no mailbox commands allowed.
9924 /* ARGSUSED */
9925 static void
9926 ql_cmd_timeout(ql_adapter_state_t *ha, ql_tgt_t *tq, ql_srb_t *sp,
9927 uint32_t *set_flags, uint32_t *reset_flags)
9929 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
9931 if (!(sp->flags & SRB_ISP_STARTED)) {
9933 EL(ha, "command timed out in driver = %ph\n", (void *)sp);
9935 REQUEST_RING_LOCK(ha);
9937 /* if it's on a queue */
9938 if (sp->cmd.head) {
9940 * The pending_cmds que needs to be
9941 * protected by the ring lock
9943 ql_remove_link(sp->cmd.head, &sp->cmd);
9945 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
9947 /* Release device queue lock. */
9948 REQUEST_RING_UNLOCK(ha);
9949 DEVICE_QUEUE_UNLOCK(tq);
9951 /* Set timeout status */
9952 sp->pkt->pkt_reason = CS_TIMEOUT;
9954 /* Ensure no retry */
9955 sp->flags &= ~SRB_RETRY;
9957 /* Call done routine to handle completion. */
9958 ql_done(&sp->cmd);
9960 DEVICE_QUEUE_LOCK(tq);
9961 } else if (CFG_IST(ha, CFG_CTRL_8021)) {
9962 int rval;
9963 uint32_t index;
9965 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
9966 "spf=%xh\n", (void *)sp,
9967 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
9968 sp->handle & OSC_INDEX_MASK, sp->flags);
9970 DEVICE_QUEUE_UNLOCK(tq);
9972 INTR_LOCK(ha);
9973 ha->pha->xioctl->ControllerErrorCount++;
9974 if (sp->handle) {
9975 ha->pha->timeout_cnt++;
9976 index = sp->handle & OSC_INDEX_MASK;
9977 if (ha->pha->outstanding_cmds[index] == sp) {
9978 sp->request_ring_ptr->entry_type =
9979 INVALID_ENTRY_TYPE;
9980 sp->request_ring_ptr->entry_count = 0;
9981 ha->pha->outstanding_cmds[index] = 0;
9983 INTR_UNLOCK(ha);
9985 rval = ql_abort_command(ha, sp);
9986 if (rval == QL_FUNCTION_TIMEOUT ||
9987 rval == QL_LOCK_TIMEOUT ||
9988 rval == QL_FUNCTION_PARAMETER_ERROR ||
9989 ha->pha->timeout_cnt > TIMEOUT_THRESHOLD) {
9990 *set_flags |= ISP_ABORT_NEEDED;
9991 EL(ha, "abort status=%xh, tc=%xh, isp_abort_"
9992 "needed\n", rval, ha->pha->timeout_cnt);
9995 sp->handle = 0;
9996 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
9997 } else {
9998 INTR_UNLOCK(ha);
10001 /* Set timeout status */
10002 sp->pkt->pkt_reason = CS_TIMEOUT;
10004 /* Ensure no retry */
10005 sp->flags &= ~SRB_RETRY;
10007 /* Call done routine to handle completion. */
10008 ql_done(&sp->cmd);
10010 DEVICE_QUEUE_LOCK(tq);
10012 } else {
10013 EL(ha, "command timed out in isp=%ph, osc=%ph, index=%xh, "
10014 "spf=%xh, isp_abort_needed\n", (void *)sp,
10015 (void *)ha->outstanding_cmds[sp->handle & OSC_INDEX_MASK],
10016 sp->handle & OSC_INDEX_MASK, sp->flags);
10018 /* Release device queue lock. */
10019 DEVICE_QUEUE_UNLOCK(tq);
10021 INTR_LOCK(ha);
10022 ha->pha->xioctl->ControllerErrorCount++;
10023 INTR_UNLOCK(ha);
10025 /* Set ISP needs to be reset */
10026 sp->flags |= SRB_COMMAND_TIMEOUT;
10028 if (CFG_IST(ha, CFG_DUMP_DRIVER_COMMAND_TIMEOUT)) {
10029 (void) ql_binary_fw_dump(ha, TRUE);
10032 *set_flags |= ISP_ABORT_NEEDED;
10034 DEVICE_QUEUE_LOCK(tq);
10037 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10041 * ql_rst_aen
10042 * Processes asynchronous reset.
10044 * Input:
10045 * ha = adapter state pointer.
10047 * Context:
10048 * Kernel context.
10050 static void
10051 ql_rst_aen(ql_adapter_state_t *ha)
10053 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10055 /* Issue marker command. */
10056 (void) ql_marker(ha, 0, 0, MK_SYNC_ALL);
10058 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10062 * ql_cmd_wait
10063 * Stall driver until all outstanding commands are returned.
10065 * Input:
10066 * ha = adapter state pointer.
10068 * Context:
10069 * Kernel context.
10071 void
10072 ql_cmd_wait(ql_adapter_state_t *ha)
10074 uint16_t index;
10075 ql_link_t *link;
10076 ql_tgt_t *tq;
10077 ql_adapter_state_t *vha;
10079 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10081 /* Wait for all outstanding commands to be returned. */
10082 (void) ql_wait_outstanding(ha);
10085 * clear out internally queued commands
10087 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10088 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10089 for (link = vha->dev[index].first; link != NULL;
10090 link = link->next) {
10091 tq = link->base_address;
10092 if (tq &&
10093 (!(tq->prli_svc_param_word_3 &
10094 PRLI_W3_RETRY))) {
10095 (void) ql_abort_device(vha, tq, 0);
10101 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10105 * ql_wait_outstanding
10106 * Wait for all outstanding commands to complete.
10108 * Input:
10109 * ha = adapter state pointer.
10111 * Returns:
10112 * index - the index for ql_srb into outstanding_cmds.
10114 * Context:
10115 * Kernel context.
10117 static uint16_t
10118 ql_wait_outstanding(ql_adapter_state_t *ha)
10120 ql_srb_t *sp;
10121 uint16_t index, count;
10123 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10125 count = ql_osc_wait_count;
10126 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10127 if (ha->pha->pending_cmds.first != NULL) {
10128 ql_start_iocb(ha, NULL);
10129 index = 1;
10131 if ((sp = ha->pha->outstanding_cmds[index]) != NULL &&
10132 (sp->flags & SRB_COMMAND_TIMEOUT) == 0) {
10133 if (count-- != 0) {
10134 ql_delay(ha, 10000);
10135 index = 0;
10136 } else {
10137 EL(ha, "failed, sp=%ph, oci=%d, hdl=%xh\n",
10138 (void *)sp, index, sp->handle);
10139 break;
10144 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10146 return (index);
10150 * ql_restart_queues
10151 * Restart device queues.
10153 * Input:
10154 * ha = adapter state pointer.
10155 * DEVICE_QUEUE_LOCK must be released.
10157 * Context:
10158 * Interrupt or Kernel context, no mailbox commands allowed.
10160 static void
10161 ql_restart_queues(ql_adapter_state_t *ha)
10163 ql_link_t *link, *link2;
10164 ql_tgt_t *tq;
10165 ql_lun_t *lq;
10166 uint16_t index;
10167 ql_adapter_state_t *vha;
10169 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10171 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10172 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10173 for (link = vha->dev[index].first; link != NULL;
10174 link = link->next) {
10175 tq = link->base_address;
10177 /* Acquire device queue lock. */
10178 DEVICE_QUEUE_LOCK(tq);
10180 tq->flags &= ~TQF_QUEUE_SUSPENDED;
10182 for (link2 = tq->lun_queues.first;
10183 link2 != NULL; link2 = link2->next) {
10184 lq = link2->base_address;
10186 if (lq->cmd.first != NULL) {
10187 ql_next(vha, lq);
10188 DEVICE_QUEUE_LOCK(tq);
10192 /* Release device queue lock. */
10193 DEVICE_QUEUE_UNLOCK(tq);
10198 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10202 * ql_iidma
10203 * Setup iiDMA parameters to firmware
10205 * Input:
10206 * ha = adapter state pointer.
10207 * DEVICE_QUEUE_LOCK must be released.
10209 * Context:
10210 * Interrupt or Kernel context, no mailbox commands allowed.
10212 static void
10213 ql_iidma(ql_adapter_state_t *ha)
10215 ql_link_t *link;
10216 ql_tgt_t *tq;
10217 uint16_t index;
10218 char buf[256];
10219 uint32_t data;
10221 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10223 if ((CFG_IST(ha, CFG_CTRL_242581)) == 0) {
10224 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10225 return;
10228 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10229 for (link = ha->dev[index].first; link != NULL;
10230 link = link->next) {
10231 tq = link->base_address;
10233 /* Acquire device queue lock. */
10234 DEVICE_QUEUE_LOCK(tq);
10236 if ((tq->flags & TQF_IIDMA_NEEDED) == 0) {
10237 DEVICE_QUEUE_UNLOCK(tq);
10238 continue;
10241 tq->flags &= ~TQF_IIDMA_NEEDED;
10243 if ((tq->loop_id > LAST_N_PORT_HDL) ||
10244 (tq->iidma_rate == IIDMA_RATE_NDEF)) {
10245 DEVICE_QUEUE_UNLOCK(tq);
10246 continue;
10249 /* Get the iiDMA persistent data */
10250 if (tq->iidma_rate == IIDMA_RATE_INIT) {
10251 (void) sprintf(buf,
10252 "iidma-rate-%02x%02x%02x%02x%02x"
10253 "%02x%02x%02x", tq->port_name[0],
10254 tq->port_name[1], tq->port_name[2],
10255 tq->port_name[3], tq->port_name[4],
10256 tq->port_name[5], tq->port_name[6],
10257 tq->port_name[7]);
10259 if ((data = ql_get_prop(ha, buf)) ==
10260 0xffffffff) {
10261 tq->iidma_rate = IIDMA_RATE_NDEF;
10262 } else {
10263 switch (data) {
10264 case IIDMA_RATE_1GB:
10265 case IIDMA_RATE_2GB:
10266 case IIDMA_RATE_4GB:
10267 case IIDMA_RATE_10GB:
10268 tq->iidma_rate = data;
10269 break;
10270 case IIDMA_RATE_8GB:
10271 if (CFG_IST(ha,
10272 CFG_CTRL_25XX)) {
10273 tq->iidma_rate = data;
10274 } else {
10275 tq->iidma_rate =
10276 IIDMA_RATE_4GB;
10278 break;
10279 default:
10280 EL(ha, "invalid data for "
10281 "parameter: %s: %xh\n",
10282 buf, data);
10283 tq->iidma_rate =
10284 IIDMA_RATE_NDEF;
10285 break;
10290 /* Set the firmware's iiDMA rate */
10291 if (tq->iidma_rate <= IIDMA_RATE_MAX &&
10292 !(CFG_IST(ha, CFG_CTRL_8081))) {
10293 data = ql_iidma_rate(ha, tq->loop_id,
10294 &tq->iidma_rate, EXT_IIDMA_MODE_SET);
10295 if (data != QL_SUCCESS) {
10296 EL(ha, "mbx failed: %xh\n", data);
10300 /* Release device queue lock. */
10301 DEVICE_QUEUE_UNLOCK(tq);
10305 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10309 * ql_abort_queues
10310 * Abort all commands on device queues.
10312 * Input:
10313 * ha = adapter state pointer.
10315 * Context:
10316 * Interrupt or Kernel context, no mailbox commands allowed.
10318 static void
10319 ql_abort_queues(ql_adapter_state_t *ha)
10321 ql_link_t *link;
10322 ql_tgt_t *tq;
10323 ql_srb_t *sp;
10324 uint16_t index;
10325 ql_adapter_state_t *vha;
10327 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10329 /* Return all commands in outstanding command list. */
10330 INTR_LOCK(ha);
10332 /* Place all commands in outstanding cmd list on device queue. */
10333 for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
10334 if (ha->pending_cmds.first != NULL) {
10335 INTR_UNLOCK(ha);
10336 ql_start_iocb(ha, NULL);
10337 /* Delay for system */
10338 ql_delay(ha, 10000);
10339 INTR_LOCK(ha);
10340 index = 1;
10342 sp = ha->outstanding_cmds[index];
10344 /* skip devices capable of FCP2 retrys */
10345 if ((sp != NULL) &&
10346 ((tq = sp->lun_queue->target_queue) != NULL) &&
10347 (!(tq->prli_svc_param_word_3 & PRLI_W3_RETRY))) {
10348 ha->outstanding_cmds[index] = NULL;
10349 sp->handle = 0;
10350 sp->flags &= ~SRB_IN_TOKEN_ARRAY;
10352 INTR_UNLOCK(ha);
10354 /* Set ending status. */
10355 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10356 sp->flags |= SRB_ISP_COMPLETED;
10358 /* Call done routine to handle completions. */
10359 sp->cmd.next = NULL;
10360 ql_done(&sp->cmd);
10362 INTR_LOCK(ha);
10365 INTR_UNLOCK(ha);
10367 for (vha = ha; vha != NULL; vha = vha->vp_next) {
10368 QL_PRINT_10(CE_CONT, "(%d,%d): abort instance\n",
10369 vha->instance, vha->vp_index);
10370 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10371 for (link = vha->dev[index].first; link != NULL;
10372 link = link->next) {
10373 tq = link->base_address;
10374 /* skip devices capable of FCP2 retrys */
10375 if (!(tq->prli_svc_param_word_3 &
10376 PRLI_W3_RETRY)) {
10378 * Set port unavailable status and
10379 * return all commands on a devices
10380 * queues.
10382 ql_abort_device_queues(ha, tq);
10387 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10391 * ql_abort_device_queues
10392 * Abort all commands on device queues.
10394 * Input:
10395 * ha = adapter state pointer.
10397 * Context:
10398 * Interrupt or Kernel context, no mailbox commands allowed.
10400 static void
10401 ql_abort_device_queues(ql_adapter_state_t *ha, ql_tgt_t *tq)
10403 ql_link_t *lun_link, *cmd_link;
10404 ql_srb_t *sp;
10405 ql_lun_t *lq;
10407 QL_PRINT_10(CE_CONT, "(%d): started\n", ha->instance);
10409 DEVICE_QUEUE_LOCK(tq);
10411 for (lun_link = tq->lun_queues.first; lun_link != NULL;
10412 lun_link = lun_link->next) {
10413 lq = lun_link->base_address;
10415 cmd_link = lq->cmd.first;
10416 while (cmd_link != NULL) {
10417 sp = cmd_link->base_address;
10419 if (sp->flags & SRB_ABORT) {
10420 cmd_link = cmd_link->next;
10421 continue;
10424 /* Remove srb from device cmd queue. */
10425 ql_remove_link(&lq->cmd, &sp->cmd);
10427 sp->flags &= ~SRB_IN_DEVICE_QUEUE;
10429 DEVICE_QUEUE_UNLOCK(tq);
10431 /* Set ending status. */
10432 sp->pkt->pkt_reason = CS_PORT_UNAVAILABLE;
10434 /* Call done routine to handle completion. */
10435 ql_done(&sp->cmd);
10437 /* Delay for system */
10438 ql_delay(ha, 10000);
10440 DEVICE_QUEUE_LOCK(tq);
10441 cmd_link = lq->cmd.first;
10444 DEVICE_QUEUE_UNLOCK(tq);
10446 QL_PRINT_10(CE_CONT, "(%d): done\n", ha->instance);
10450 * ql_loop_resync
10451 * Resync with fibre channel devices.
10453 * Input:
10454 * ha = adapter state pointer.
10455 * DEVICE_QUEUE_LOCK must be released.
10457 * Returns:
10458 * ql local function return status code.
10460 * Context:
10461 * Kernel context.
10463 static int
10464 ql_loop_resync(ql_adapter_state_t *ha)
10466 int rval;
10468 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10470 if (ha->flags & IP_INITIALIZED) {
10471 (void) ql_shutdown_ip(ha);
10474 rval = ql_fw_ready(ha, 10);
10476 TASK_DAEMON_LOCK(ha);
10477 ha->task_daemon_flags &= ~LOOP_RESYNC_ACTIVE;
10478 TASK_DAEMON_UNLOCK(ha);
10480 /* Set loop online, if it really is. */
10481 if (rval == QL_SUCCESS) {
10482 ql_loop_online(ha);
10483 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10484 } else {
10485 EL(ha, "failed, rval = %xh\n", rval);
10488 return (rval);
10492 * ql_loop_online
10493 * Set loop online status if it really is online.
10495 * Input:
10496 * ha = adapter state pointer.
10497 * DEVICE_QUEUE_LOCK must be released.
10499 * Context:
10500 * Kernel context.
10502 void
10503 ql_loop_online(ql_adapter_state_t *ha)
10505 ql_adapter_state_t *vha;
10507 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10509 /* Inform the FC Transport that the hardware is online. */
10510 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
10511 if (!(vha->task_daemon_flags &
10512 (LOOP_RESYNC_NEEDED | LOOP_DOWN))) {
10513 /* Restart IP if it was shutdown. */
10514 if (vha->vp_index == 0 && vha->flags & IP_ENABLED &&
10515 !(vha->flags & IP_INITIALIZED)) {
10516 (void) ql_initialize_ip(vha);
10517 ql_isp_rcvbuf(vha);
10520 if (FC_PORT_STATE_MASK(vha->state) != FC_STATE_LOOP &&
10521 FC_PORT_STATE_MASK(vha->state) !=
10522 FC_STATE_ONLINE) {
10523 vha->state = FC_PORT_SPEED_MASK(vha->state);
10524 if (vha->topology & QL_LOOP_CONNECTION) {
10525 vha->state |= FC_STATE_LOOP;
10526 } else {
10527 vha->state |= FC_STATE_ONLINE;
10529 TASK_DAEMON_LOCK(ha);
10530 vha->task_daemon_flags |= FC_STATE_CHANGE;
10531 TASK_DAEMON_UNLOCK(ha);
10536 ql_awaken_task_daemon(ha, NULL, 0, 0);
10538 /* Restart device queues that may have been stopped. */
10539 ql_restart_queues(ha);
10541 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10545 * ql_fca_handle_to_state
10546 * Verifies handle to be correct.
10548 * Input:
10549 * fca_handle = pointer to state structure.
10551 * Returns:
10552 * NULL = failure
10554 * Context:
10555 * Kernel context.
10557 static ql_adapter_state_t *
10558 ql_fca_handle_to_state(opaque_t fca_handle)
10560 #ifdef QL_DEBUG_ROUTINES
10561 ql_link_t *link;
10562 ql_adapter_state_t *ha = NULL;
10563 ql_adapter_state_t *vha = NULL;
10565 for (link = ql_hba.first; link != NULL; link = link->next) {
10566 ha = link->base_address;
10567 for (vha = ha->vp_next; vha != NULL; vha = vha->vp_next) {
10568 if ((opaque_t)vha == fca_handle) {
10569 ha = vha;
10570 break;
10573 if ((opaque_t)ha == fca_handle) {
10574 break;
10575 } else {
10576 ha = NULL;
10580 if (ha == NULL) {
10581 /*EMPTY*/
10582 QL_PRINT_2(CE_CONT, "failed\n");
10585 #endif /* QL_DEBUG_ROUTINES */
10587 return ((ql_adapter_state_t *)fca_handle);
10591 * ql_d_id_to_queue
10592 * Locate device queue that matches destination ID.
10594 * Input:
10595 * ha = adapter state pointer.
10596 * d_id = destination ID
10598 * Returns:
10599 * NULL = failure
10601 * Context:
10602 * Interrupt or Kernel context, no mailbox commands allowed.
10604 ql_tgt_t *
10605 ql_d_id_to_queue(ql_adapter_state_t *ha, port_id_t d_id)
10607 uint16_t index;
10608 ql_tgt_t *tq;
10609 ql_link_t *link;
10611 /* Get head queue index. */
10612 index = ql_alpa_to_index[d_id.b.al_pa];
10614 for (link = ha->dev[index].first; link != NULL; link = link->next) {
10615 tq = link->base_address;
10616 if (tq->d_id.b24 == d_id.b24 &&
10617 VALID_DEVICE_ID(ha, tq->loop_id)) {
10618 return (tq);
10622 return (NULL);
10626 * ql_loop_id_to_queue
10627 * Locate device queue that matches loop ID.
10629 * Input:
10630 * ha: adapter state pointer.
10631 * loop_id: destination ID
10633 * Returns:
10634 * NULL = failure
10636 * Context:
10637 * Interrupt or Kernel context, no mailbox commands allowed.
10639 ql_tgt_t *
10640 ql_loop_id_to_queue(ql_adapter_state_t *ha, uint16_t loop_id)
10642 uint16_t index;
10643 ql_tgt_t *tq;
10644 ql_link_t *link;
10646 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
10647 for (link = ha->dev[index].first; link != NULL;
10648 link = link->next) {
10649 tq = link->base_address;
10650 if (tq->loop_id == loop_id) {
10651 return (tq);
10656 return (NULL);
10660 * ql_kstat_update
10661 * Updates kernel statistics.
10663 * Input:
10664 * ksp - driver kernel statistics structure pointer.
10665 * rw - function to perform
10667 * Returns:
10668 * 0 or EACCES
10670 * Context:
10671 * Kernel context.
10673 /* ARGSUSED */
10674 static int
10675 ql_kstat_update(kstat_t *ksp, int rw)
10677 int rval;
10679 QL_PRINT_3(CE_CONT, "started\n");
10681 if (rw == KSTAT_WRITE) {
10682 rval = EACCES;
10683 } else {
10684 rval = 0;
10687 if (rval != 0) {
10688 /*EMPTY*/
10689 QL_PRINT_2(CE_CONT, "failed, rval = %xh\n", rval);
10690 } else {
10691 /*EMPTY*/
10692 QL_PRINT_3(CE_CONT, "done\n");
10694 return (rval);
10698 * ql_load_flash
10699 * Loads flash.
10701 * Input:
10702 * ha: adapter state pointer.
10703 * dp: data pointer.
10704 * size: data length.
10706 * Returns:
10707 * ql local function return status code.
10709 * Context:
10710 * Kernel context.
10713 ql_load_flash(ql_adapter_state_t *ha, uint8_t *dp, uint32_t size)
10715 uint32_t cnt;
10716 int rval;
10717 uint32_t size_to_offset;
10718 uint32_t size_to_compare;
10719 int erase_all;
10721 if (CFG_IST(ha, CFG_CTRL_24258081)) {
10722 return (ql_24xx_load_flash(ha, dp, size, 0));
10725 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10727 size_to_compare = 0x20000;
10728 size_to_offset = 0;
10729 erase_all = 0;
10730 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10731 if (size == 0x80000) {
10732 /* Request to flash the entire chip. */
10733 size_to_compare = 0x80000;
10734 erase_all = 1;
10735 } else {
10736 size_to_compare = 0x40000;
10737 if (ql_flash_sbus_fpga) {
10738 size_to_offset = 0x40000;
10742 if (size > size_to_compare) {
10743 rval = QL_FUNCTION_PARAMETER_ERROR;
10744 EL(ha, "failed=%xh\n", rval);
10745 return (rval);
10748 GLOBAL_HW_LOCK();
10750 /* Enable Flash Read/Write. */
10751 ql_flash_enable(ha);
10753 /* Erase flash prior to write. */
10754 rval = ql_erase_flash(ha, erase_all);
10756 if (rval == QL_SUCCESS) {
10757 /* Write data to flash. */
10758 for (cnt = 0; cnt < size; cnt++) {
10759 /* Allow other system activity. */
10760 if (cnt % 0x1000 == 0) {
10761 ql_delay(ha, 10000);
10763 rval = ql_program_flash_address(ha,
10764 cnt + size_to_offset, *dp++);
10765 if (rval != QL_SUCCESS) {
10766 break;
10771 ql_flash_disable(ha);
10773 GLOBAL_HW_UNLOCK();
10775 if (rval != QL_SUCCESS) {
10776 EL(ha, "failed=%xh\n", rval);
10777 } else {
10778 /*EMPTY*/
10779 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10781 return (rval);
10785 * ql_program_flash_address
10786 * Program flash address.
10788 * Input:
10789 * ha = adapter state pointer.
10790 * addr = flash byte address.
10791 * data = data to be written to flash.
10793 * Returns:
10794 * ql local function return status code.
10796 * Context:
10797 * Kernel context.
10799 static int
10800 ql_program_flash_address(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
10802 int rval;
10804 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10806 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10807 ql_write_flash_byte(ha, 0x5555, 0xa0);
10808 ql_write_flash_byte(ha, addr, data);
10809 } else {
10810 /* Write Program Command Sequence */
10811 ql_write_flash_byte(ha, 0x5555, 0xaa);
10812 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10813 ql_write_flash_byte(ha, 0x5555, 0xa0);
10814 ql_write_flash_byte(ha, addr, data);
10817 /* Wait for write to complete. */
10818 rval = ql_poll_flash(ha, addr, data);
10820 if (rval != QL_SUCCESS) {
10821 EL(ha, "failed=%xh\n", rval);
10822 } else {
10823 /*EMPTY*/
10824 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10826 return (rval);
10830 * ql_erase_flash
10831 * Erases entire flash.
10833 * Input:
10834 * ha = adapter state pointer.
10836 * Returns:
10837 * ql local function return status code.
10839 * Context:
10840 * Kernel context.
10843 ql_erase_flash(ql_adapter_state_t *ha, int erase_all)
10845 int rval;
10846 uint32_t erase_delay = 2000000;
10847 uint32_t sStartAddr;
10848 uint32_t ssize;
10849 uint32_t cnt;
10850 uint8_t *bfp;
10851 uint8_t *tmp;
10853 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10855 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10857 if (ql_flash_sbus_fpga == 1) {
10858 ssize = QL_SBUS_FCODE_SIZE;
10859 sStartAddr = QL_FCODE_OFFSET;
10860 } else {
10861 ssize = QL_FPGA_SIZE;
10862 sStartAddr = QL_FPGA_OFFSET;
10865 erase_delay = 20000000;
10867 bfp = kmem_zalloc(ssize, KM_SLEEP);
10869 /* Save the section of flash we're not updating to buffer */
10870 tmp = bfp;
10871 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10872 /* Allow other system activity. */
10873 if (cnt % 0x1000 == 0) {
10874 ql_delay(ha, 10000);
10876 *tmp++ = (uint8_t)ql_read_flash_byte(ha, cnt);
10880 /* Chip Erase Command Sequence */
10881 ql_write_flash_byte(ha, 0x5555, 0xaa);
10882 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10883 ql_write_flash_byte(ha, 0x5555, 0x80);
10884 ql_write_flash_byte(ha, 0x5555, 0xaa);
10885 ql_write_flash_byte(ha, 0x2aaa, 0x55);
10886 ql_write_flash_byte(ha, 0x5555, 0x10);
10888 ql_delay(ha, erase_delay);
10890 /* Wait for erase to complete. */
10891 rval = ql_poll_flash(ha, 0, 0x80);
10893 if (rval != QL_SUCCESS) {
10894 EL(ha, "failed=%xh\n", rval);
10895 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10896 kmem_free(bfp, ssize);
10898 return (rval);
10901 /* restore the section we saved in the buffer */
10902 if ((CFG_IST(ha, CFG_SBUS_CARD)) && !erase_all) {
10903 /* Restore the section we saved off */
10904 tmp = bfp;
10905 for (cnt = sStartAddr; cnt < ssize+sStartAddr; cnt++) {
10906 /* Allow other system activity. */
10907 if (cnt % 0x1000 == 0) {
10908 ql_delay(ha, 10000);
10910 rval = ql_program_flash_address(ha, cnt, *tmp++);
10911 if (rval != QL_SUCCESS) {
10912 break;
10916 kmem_free(bfp, ssize);
10919 if (rval != QL_SUCCESS) {
10920 EL(ha, "failed=%xh\n", rval);
10921 } else {
10922 /*EMPTY*/
10923 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10925 return (rval);
10929 * ql_poll_flash
10930 * Polls flash for completion.
10932 * Input:
10933 * ha = adapter state pointer.
10934 * addr = flash byte address.
10935 * data = data to be polled.
10937 * Returns:
10938 * ql local function return status code.
10940 * Context:
10941 * Kernel context.
10944 ql_poll_flash(ql_adapter_state_t *ha, uint32_t addr, uint8_t poll_data)
10946 uint8_t flash_data;
10947 uint32_t cnt;
10948 int rval = QL_FUNCTION_FAILED;
10950 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10952 poll_data = (uint8_t)(poll_data & BIT_7);
10954 /* Wait for 30 seconds for command to finish. */
10955 for (cnt = 30000000; cnt; cnt--) {
10956 flash_data = (uint8_t)ql_read_flash_byte(ha, addr);
10958 if ((flash_data & BIT_7) == poll_data) {
10959 rval = QL_SUCCESS;
10960 break;
10962 if (flash_data & BIT_5 && cnt > 2) {
10963 cnt = 2;
10965 drv_usecwait(1);
10968 if (rval != QL_SUCCESS) {
10969 EL(ha, "failed=%xh\n", rval);
10970 } else {
10971 /*EMPTY*/
10972 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
10974 return (rval);
10978 * ql_flash_enable
10979 * Setup flash for reading/writing.
10981 * Input:
10982 * ha = adapter state pointer.
10984 * Context:
10985 * Kernel context.
10987 void
10988 ql_flash_enable(ql_adapter_state_t *ha)
10990 uint16_t data;
10992 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
10994 /* Enable Flash Read/Write. */
10995 if (CFG_IST(ha, CFG_SBUS_CARD)) {
10996 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
10997 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
10998 data = (uint16_t)(data | SBUS_FLASH_WRITE_ENABLE);
10999 ddi_put16(ha->sbus_fpga_dev_handle,
11000 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11001 /* Read reset command sequence */
11002 ql_write_flash_byte(ha, 0xaaa, 0xaa);
11003 ql_write_flash_byte(ha, 0x555, 0x55);
11004 ql_write_flash_byte(ha, 0xaaa, 0x20);
11005 ql_write_flash_byte(ha, 0x555, 0xf0);
11006 } else {
11007 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) |
11008 ISP_FLASH_ENABLE);
11009 WRT16_IO_REG(ha, ctrl_status, data);
11011 /* Read/Reset Command Sequence */
11012 ql_write_flash_byte(ha, 0x5555, 0xaa);
11013 ql_write_flash_byte(ha, 0x2aaa, 0x55);
11014 ql_write_flash_byte(ha, 0x5555, 0xf0);
11016 (void) ql_read_flash_byte(ha, 0);
11018 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11022 * ql_flash_disable
11023 * Disable flash and allow RISC to run.
11025 * Input:
11026 * ha = adapter state pointer.
11028 * Context:
11029 * Kernel context.
11031 void
11032 ql_flash_disable(ql_adapter_state_t *ha)
11034 uint16_t data;
11036 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11038 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11040 * Lock the flash back up.
11042 ql_write_flash_byte(ha, 0x555, 0x90);
11043 ql_write_flash_byte(ha, 0x555, 0x0);
11045 data = (uint16_t)ddi_get16(ha->sbus_fpga_dev_handle,
11046 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF));
11047 data = (uint16_t)(data & ~SBUS_FLASH_WRITE_ENABLE);
11048 ddi_put16(ha->sbus_fpga_dev_handle,
11049 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_CONF), data);
11050 } else {
11051 data = (uint16_t)(RD16_IO_REG(ha, ctrl_status) &
11052 ~ISP_FLASH_ENABLE);
11053 WRT16_IO_REG(ha, ctrl_status, data);
11056 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11060 * ql_write_flash_byte
11061 * Write byte to flash.
11063 * Input:
11064 * ha = adapter state pointer.
11065 * addr = flash byte address.
11066 * data = data to be written.
11068 * Context:
11069 * Kernel context.
11071 void
11072 ql_write_flash_byte(ql_adapter_state_t *ha, uint32_t addr, uint8_t data)
11074 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11075 ddi_put16(ha->sbus_fpga_dev_handle,
11076 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11077 LSW(addr));
11078 ddi_put16(ha->sbus_fpga_dev_handle,
11079 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11080 MSW(addr));
11081 ddi_put16(ha->sbus_fpga_dev_handle,
11082 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA),
11083 (uint16_t)data);
11084 } else {
11085 uint16_t bank_select;
11087 /* Setup bit 16 of flash address. */
11088 bank_select = (uint16_t)RD16_IO_REG(ha, ctrl_status);
11090 if (CFG_IST(ha, CFG_CTRL_6322)) {
11091 bank_select = (uint16_t)(bank_select & ~0xf0);
11092 bank_select = (uint16_t)(bank_select |
11093 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11094 WRT16_IO_REG(ha, ctrl_status, bank_select);
11095 } else {
11096 if (addr & BIT_16 && !(bank_select &
11097 ISP_FLASH_64K_BANK)) {
11098 bank_select = (uint16_t)(bank_select |
11099 ISP_FLASH_64K_BANK);
11100 WRT16_IO_REG(ha, ctrl_status, bank_select);
11101 } else if (!(addr & BIT_16) && bank_select &
11102 ISP_FLASH_64K_BANK) {
11103 bank_select = (uint16_t)(bank_select &
11104 ~ISP_FLASH_64K_BANK);
11105 WRT16_IO_REG(ha, ctrl_status, bank_select);
11109 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11110 WRT16_IO_REG(ha, flash_address, (uint16_t)addr);
11111 WRT16_IO_REG(ha, flash_data, (uint16_t)data);
11112 } else {
11113 WRT16_IOMAP_REG(ha, flash_address, addr);
11114 WRT16_IOMAP_REG(ha, flash_data, data);
11120 * ql_read_flash_byte
11121 * Reads byte from flash, but must read a word from chip.
11123 * Input:
11124 * ha = adapter state pointer.
11125 * addr = flash byte address.
11127 * Returns:
11128 * byte from flash.
11130 * Context:
11131 * Kernel context.
11133 uint8_t
11134 ql_read_flash_byte(ql_adapter_state_t *ha, uint32_t addr)
11136 uint8_t data;
11138 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11139 ddi_put16(ha->sbus_fpga_dev_handle,
11140 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_LOADDR),
11141 LSW(addr));
11142 ddi_put16(ha->sbus_fpga_dev_handle,
11143 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_HIADDR),
11144 MSW(addr));
11145 data = (uint8_t)ddi_get16(ha->sbus_fpga_dev_handle,
11146 (uint16_t *)(ha->sbus_fpga_iobase + FPGA_EEPROM_DATA));
11147 } else {
11148 uint16_t bank_select;
11150 /* Setup bit 16 of flash address. */
11151 bank_select = RD16_IO_REG(ha, ctrl_status);
11152 if (CFG_IST(ha, CFG_CTRL_6322)) {
11153 bank_select = (uint16_t)(bank_select & ~0xf0);
11154 bank_select = (uint16_t)(bank_select |
11155 ((addr >> 12 & 0xf0) | ISP_FLASH_64K_BANK));
11156 WRT16_IO_REG(ha, ctrl_status, bank_select);
11157 } else {
11158 if (addr & BIT_16 &&
11159 !(bank_select & ISP_FLASH_64K_BANK)) {
11160 bank_select = (uint16_t)(bank_select |
11161 ISP_FLASH_64K_BANK);
11162 WRT16_IO_REG(ha, ctrl_status, bank_select);
11163 } else if (!(addr & BIT_16) &&
11164 bank_select & ISP_FLASH_64K_BANK) {
11165 bank_select = (uint16_t)(bank_select &
11166 ~ISP_FLASH_64K_BANK);
11167 WRT16_IO_REG(ha, ctrl_status, bank_select);
11171 if (CFG_IST(ha, CFG_SBUS_CARD)) {
11172 WRT16_IO_REG(ha, flash_address, addr);
11173 data = (uint8_t)RD16_IO_REG(ha, flash_data);
11174 } else {
11175 WRT16_IOMAP_REG(ha, flash_address, addr);
11176 data = (uint8_t)RD16_IOMAP_REG(ha, flash_data);
11180 return (data);
11184 * ql_24xx_flash_id
11185 * Get flash IDs.
11187 * Input:
11188 * ha: adapter state pointer.
11190 * Returns:
11191 * ql local function return status code.
11193 * Context:
11194 * Kernel context.
11197 ql_24xx_flash_id(ql_adapter_state_t *vha)
11199 int rval;
11200 uint32_t fdata = 0;
11201 ql_adapter_state_t *ha = vha->pha;
11202 ql_xioctl_t *xp = ha->xioctl;
11204 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11206 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR | 0x3AB, &fdata);
11208 if (rval != QL_SUCCESS || fdata == 0 || CFG_IST(ha, CFG_CTRL_2581)) {
11209 fdata = 0;
11210 rval = ql_24xx_read_flash(ha, FLASH_CONF_ADDR |
11211 (CFG_IST(ha, CFG_CTRL_2422) ? 0x39F : 0x49F), &fdata);
11214 if (rval != QL_SUCCESS) {
11215 EL(ha, "24xx read_flash failed=%xh\n", rval);
11216 } else if (fdata != 0) {
11217 xp->fdesc.flash_manuf = LSB(LSW(fdata));
11218 xp->fdesc.flash_id = MSB(LSW(fdata));
11219 xp->fdesc.flash_len = LSB(MSW(fdata));
11220 } else {
11221 xp->fdesc.flash_manuf = ATMEL_FLASH;
11222 xp->fdesc.flash_id = ATMEL_FLASHID_1024K;
11223 xp->fdesc.flash_len = 0;
11226 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11228 return (rval);
11232 * ql_24xx_load_flash
11233 * Loads flash.
11235 * Input:
11236 * ha = adapter state pointer.
11237 * dp = data pointer.
11238 * size = data length in bytes.
11239 * faddr = 32bit word flash byte address.
11241 * Returns:
11242 * ql local function return status code.
11244 * Context:
11245 * Kernel context.
11248 ql_24xx_load_flash(ql_adapter_state_t *vha, uint8_t *dp, uint32_t size,
11249 uint32_t faddr)
11251 int rval;
11252 uint32_t cnt, rest_addr, fdata, wc;
11253 dma_mem_t dmabuf = {0};
11254 ql_adapter_state_t *ha = vha->pha;
11255 ql_xioctl_t *xp = ha->xioctl;
11257 QL_PRINT_3(CE_CONT, "(%d): started, faddr=%xh, size=%xh\n",
11258 ha->instance, faddr, size);
11260 /* start address must be 32 bit word aligned */
11261 if ((faddr & 0x3) != 0) {
11262 EL(ha, "incorrect buffer size alignment\n");
11263 return (QL_FUNCTION_PARAMETER_ERROR);
11266 /* Allocate DMA buffer */
11267 if (CFG_IST(ha, CFG_CTRL_2581)) {
11268 if ((rval = ql_get_dma_mem(ha, &dmabuf, 0xffff,
11269 LITTLE_ENDIAN_DMA, QL_DMA_DATA_ALIGN)) !=
11270 QL_SUCCESS) {
11271 EL(ha, "dma alloc failed, rval=%xh\n", rval);
11272 return (rval);
11276 GLOBAL_HW_LOCK();
11278 /* Enable flash write */
11279 if ((rval = ql_24xx_unprotect_flash(ha)) != QL_SUCCESS) {
11280 GLOBAL_HW_UNLOCK();
11281 EL(ha, "unprotect_flash failed, rval=%xh\n", rval);
11282 ql_free_phys(ha, &dmabuf);
11283 return (rval);
11286 /* setup mask of address range within a sector */
11287 rest_addr = (xp->fdesc.block_size - 1) >> 2;
11289 faddr = faddr >> 2; /* flash gets 32 bit words */
11292 * Write data to flash.
11294 cnt = 0;
11295 size = (size + 3) >> 2; /* Round up & convert to dwords */
11297 while (cnt < size) {
11298 /* Beginning of a sector? */
11299 if ((faddr & rest_addr) == 0) {
11300 if (CFG_IST(ha, CFG_CTRL_8021)) {
11301 fdata = ha->flash_data_addr | faddr;
11302 rval = ql_8021_rom_erase(ha, fdata);
11303 if (rval != QL_SUCCESS) {
11304 EL(ha, "8021 erase sector status="
11305 "%xh, start=%xh, end=%xh"
11306 "\n", rval, fdata,
11307 fdata + rest_addr);
11308 break;
11310 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11311 fdata = ha->flash_data_addr | faddr;
11312 rval = ql_flash_access(ha,
11313 FAC_ERASE_SECTOR, fdata, fdata +
11314 rest_addr, 0);
11315 if (rval != QL_SUCCESS) {
11316 EL(ha, "erase sector status="
11317 "%xh, start=%xh, end=%xh"
11318 "\n", rval, fdata,
11319 fdata + rest_addr);
11320 break;
11322 } else {
11323 fdata = (faddr & ~rest_addr) << 2;
11324 fdata = (fdata & 0xff00) |
11325 (fdata << 16 & 0xff0000) |
11326 (fdata >> 16 & 0xff);
11328 if (rest_addr == 0x1fff) {
11329 /* 32kb sector block erase */
11330 rval = ql_24xx_write_flash(ha,
11331 FLASH_CONF_ADDR | 0x0352,
11332 fdata);
11333 } else {
11334 /* 64kb sector block erase */
11335 rval = ql_24xx_write_flash(ha,
11336 FLASH_CONF_ADDR | 0x03d8,
11337 fdata);
11339 if (rval != QL_SUCCESS) {
11340 EL(ha, "Unable to flash sector"
11341 ": address=%xh\n", faddr);
11342 break;
11347 /* Write data */
11348 if (CFG_IST(ha, CFG_CTRL_2581) &&
11349 ((faddr & 0x3f) == 0)) {
11351 * Limit write up to sector boundary.
11353 wc = ((~faddr & (rest_addr>>1)) + 1);
11355 if (size - cnt < wc) {
11356 wc = size - cnt;
11359 ddi_rep_put8(dmabuf.acc_handle, (uint8_t *)dp,
11360 (uint8_t *)dmabuf.bp, wc<<2,
11361 DDI_DEV_AUTOINCR);
11363 rval = ql_wrt_risc_ram(ha, ha->flash_data_addr |
11364 faddr, dmabuf.cookie.dmac_laddress, wc);
11365 if (rval != QL_SUCCESS) {
11366 EL(ha, "unable to dma to flash "
11367 "address=%xh\n", faddr << 2);
11368 break;
11371 cnt += wc;
11372 faddr += wc;
11373 dp += wc << 2;
11374 } else {
11375 fdata = *dp++;
11376 fdata |= *dp++ << 8;
11377 fdata |= *dp++ << 16;
11378 fdata |= *dp++ << 24;
11379 rval = ql_24xx_write_flash(ha,
11380 ha->flash_data_addr | faddr, fdata);
11381 if (rval != QL_SUCCESS) {
11382 EL(ha, "Unable to program flash "
11383 "address=%xh data=%xh\n", faddr,
11384 *dp);
11385 break;
11387 cnt++;
11388 faddr++;
11390 /* Allow other system activity. */
11391 if (cnt % 0x1000 == 0) {
11392 ql_delay(ha, 10000);
11397 ql_24xx_protect_flash(ha);
11399 ql_free_phys(ha, &dmabuf);
11401 GLOBAL_HW_UNLOCK();
11403 if (rval != QL_SUCCESS) {
11404 EL(ha, "failed=%xh\n", rval);
11405 } else {
11406 /*EMPTY*/
11407 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11409 return (rval);
11413 * ql_24xx_read_flash
11414 * Reads a 32bit word from ISP24xx NVRAM/FLASH.
11416 * Input:
11417 * ha: adapter state pointer.
11418 * faddr: NVRAM/FLASH address.
11419 * bp: data pointer.
11421 * Returns:
11422 * ql local function return status code.
11424 * Context:
11425 * Kernel context.
11428 ql_24xx_read_flash(ql_adapter_state_t *vha, uint32_t faddr, uint32_t *bp)
11430 uint32_t timer;
11431 int rval = QL_SUCCESS;
11432 ql_adapter_state_t *ha = vha->pha;
11434 if (CFG_IST(ha, CFG_CTRL_8021)) {
11435 if ((rval = ql_8021_rom_read(ha, faddr, bp)) != QL_SUCCESS) {
11436 EL(ha, "8021 access error\n");
11438 return (rval);
11441 /* Clear access error flag */
11442 WRT32_IO_REG(ha, ctrl_status,
11443 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11445 WRT32_IO_REG(ha, flash_address, faddr & ~FLASH_DATA_FLAG);
11447 /* Wait for READ cycle to complete. */
11448 for (timer = 300000; timer; timer--) {
11449 if (RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) {
11450 break;
11452 drv_usecwait(10);
11455 if (timer == 0) {
11456 EL(ha, "failed, timeout\n");
11457 rval = QL_FUNCTION_TIMEOUT;
11458 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11459 EL(ha, "failed, access error\n");
11460 rval = QL_FUNCTION_FAILED;
11463 *bp = RD32_IO_REG(ha, flash_data);
11465 return (rval);
11469 * ql_24xx_write_flash
11470 * Writes a 32bit word to ISP24xx NVRAM/FLASH.
11472 * Input:
11473 * ha: adapter state pointer.
11474 * addr: NVRAM/FLASH address.
11475 * value: data.
11477 * Returns:
11478 * ql local function return status code.
11480 * Context:
11481 * Kernel context.
11484 ql_24xx_write_flash(ql_adapter_state_t *vha, uint32_t addr, uint32_t data)
11486 uint32_t timer, fdata;
11487 int rval = QL_SUCCESS;
11488 ql_adapter_state_t *ha = vha->pha;
11490 if (CFG_IST(ha, CFG_CTRL_8021)) {
11491 if ((rval = ql_8021_rom_write(ha, addr, data)) != QL_SUCCESS) {
11492 EL(ha, "8021 access error\n");
11494 return (rval);
11496 /* Clear access error flag */
11497 WRT32_IO_REG(ha, ctrl_status,
11498 RD32_IO_REG(ha, ctrl_status) | FLASH_NVRAM_ACCESS_ERROR);
11500 WRT32_IO_REG(ha, flash_data, data);
11501 RD32_IO_REG(ha, flash_data); /* PCI Posting. */
11502 WRT32_IO_REG(ha, flash_address, addr | FLASH_DATA_FLAG);
11504 /* Wait for Write cycle to complete. */
11505 for (timer = 3000000; timer; timer--) {
11506 if ((RD32_IO_REG(ha, flash_address) & FLASH_DATA_FLAG) == 0) {
11507 /* Check flash write in progress. */
11508 if ((addr & FLASH_ADDR_MASK) == FLASH_CONF_ADDR) {
11509 (void) ql_24xx_read_flash(ha,
11510 FLASH_CONF_ADDR | 0x005, &fdata);
11511 if (!(fdata & BIT_0)) {
11512 break;
11514 } else {
11515 break;
11518 drv_usecwait(10);
11520 if (timer == 0) {
11521 EL(ha, "failed, timeout\n");
11522 rval = QL_FUNCTION_TIMEOUT;
11523 } else if (RD32_IO_REG(ha, ctrl_status) & FLASH_NVRAM_ACCESS_ERROR) {
11524 EL(ha, "access error\n");
11525 rval = QL_FUNCTION_FAILED;
11528 return (rval);
11531 * ql_24xx_unprotect_flash
11532 * Enable writes
11534 * Input:
11535 * ha: adapter state pointer.
11537 * Returns:
11538 * ql local function return status code.
11540 * Context:
11541 * Kernel context.
11544 ql_24xx_unprotect_flash(ql_adapter_state_t *vha)
11546 int rval;
11547 uint32_t fdata;
11548 ql_adapter_state_t *ha = vha->pha;
11549 ql_xioctl_t *xp = ha->xioctl;
11551 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11553 if (CFG_IST(ha, CFG_CTRL_8021)) {
11554 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11555 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11556 if (rval != QL_SUCCESS) {
11557 EL(ha, "8021 access error\n");
11559 return (rval);
11561 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11562 if (ha->task_daemon_flags & FIRMWARE_UP) {
11563 if ((rval = ql_flash_access(ha, FAC_WRT_ENABLE, 0, 0,
11564 0)) != QL_SUCCESS) {
11565 EL(ha, "status=%xh\n", rval);
11567 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11568 ha->instance);
11569 return (rval);
11571 } else {
11572 /* Enable flash write. */
11573 WRT32_IO_REG(ha, ctrl_status,
11574 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11575 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11579 * Remove block write protection (SST and ST) and
11580 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11581 * Unprotect sectors.
11583 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x100 |
11584 xp->fdesc.write_statusreg_cmd, xp->fdesc.write_enable_bits);
11586 if (xp->fdesc.unprotect_sector_cmd != 0) {
11587 for (fdata = 0; fdata < 0x10; fdata++) {
11588 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11589 0x300 | xp->fdesc.unprotect_sector_cmd, fdata);
11592 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11593 xp->fdesc.unprotect_sector_cmd, 0x00400f);
11594 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11595 xp->fdesc.unprotect_sector_cmd, 0x00600f);
11596 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x300 |
11597 xp->fdesc.unprotect_sector_cmd, 0x00800f);
11600 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11602 return (QL_SUCCESS);
11606 * ql_24xx_protect_flash
11607 * Disable writes
11609 * Input:
11610 * ha: adapter state pointer.
11612 * Context:
11613 * Kernel context.
11615 void
11616 ql_24xx_protect_flash(ql_adapter_state_t *vha)
11618 int rval;
11619 uint32_t fdata;
11620 ql_adapter_state_t *ha = vha->pha;
11621 ql_xioctl_t *xp = ha->xioctl;
11623 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11625 if (CFG_IST(ha, CFG_CTRL_8021)) {
11626 (void) ql_8021_rom_wrsr(ha, xp->fdesc.write_enable_bits);
11627 rval = ql_8021_rom_wrsr(ha, xp->fdesc.write_disable_bits);
11628 if (rval != QL_SUCCESS) {
11629 EL(ha, "8021 access error\n");
11631 return;
11633 if (CFG_IST(ha, CFG_CTRL_81XX)) {
11634 if (ha->task_daemon_flags & FIRMWARE_UP) {
11635 if ((rval = ql_flash_access(ha, FAC_WRT_PROTECT, 0, 0,
11636 0)) != QL_SUCCESS) {
11637 EL(ha, "status=%xh\n", rval);
11639 QL_PRINT_3(CE_CONT, "(%d): 8100 done\n",
11640 ha->instance);
11641 return;
11643 } else {
11644 /* Enable flash write. */
11645 WRT32_IO_REG(ha, ctrl_status,
11646 RD32_IO_REG(ha, ctrl_status) | ISP_FLASH_ENABLE);
11647 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11651 * Protect sectors.
11652 * Set block write protection (SST and ST) and
11653 * Sector/Block Protection Register Lock (SST, ST, ATMEL).
11655 if (xp->fdesc.protect_sector_cmd != 0) {
11656 for (fdata = 0; fdata < 0x10; fdata++) {
11657 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR |
11658 0x330 | xp->fdesc.protect_sector_cmd, fdata);
11660 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11661 xp->fdesc.protect_sector_cmd, 0x00400f);
11662 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11663 xp->fdesc.protect_sector_cmd, 0x00600f);
11664 (void) ql_24xx_write_flash(ha, FLASH_CONF_ADDR | 0x330 |
11665 xp->fdesc.protect_sector_cmd, 0x00800f);
11667 /* TODO: ??? */
11668 (void) ql_24xx_write_flash(ha,
11669 FLASH_CONF_ADDR | 0x101, 0x80);
11670 } else {
11671 (void) ql_24xx_write_flash(ha,
11672 FLASH_CONF_ADDR | 0x101, 0x9c);
11675 /* Disable flash write. */
11676 if (!(CFG_IST(ha, CFG_CTRL_81XX))) {
11677 WRT32_IO_REG(ha, ctrl_status,
11678 RD32_IO_REG(ha, ctrl_status) & ~ISP_FLASH_ENABLE);
11679 RD32_IO_REG(ha, ctrl_status); /* PCI Posting. */
11682 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11686 * ql_dump_firmware
11687 * Save RISC code state information.
11689 * Input:
11690 * ha = adapter state pointer.
11692 * Returns:
11693 * QL local function return status code.
11695 * Context:
11696 * Kernel context.
11698 static int
11699 ql_dump_firmware(ql_adapter_state_t *vha)
11701 int rval;
11702 clock_t timer = drv_usectohz(30000000);
11703 ql_adapter_state_t *ha = vha->pha;
11705 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11707 QL_DUMP_LOCK(ha);
11709 if (ha->ql_dump_state & QL_DUMPING ||
11710 (ha->ql_dump_state & QL_DUMP_VALID &&
11711 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11712 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11713 QL_DUMP_UNLOCK(ha);
11714 return (QL_SUCCESS);
11717 QL_DUMP_UNLOCK(ha);
11719 ql_awaken_task_daemon(ha, NULL, DRIVER_STALL, 0);
11722 * Wait for all outstanding commands to complete
11724 (void) ql_wait_outstanding(ha);
11726 /* Dump firmware. */
11727 rval = ql_binary_fw_dump(ha, TRUE);
11729 /* Do abort to force restart. */
11730 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, DRIVER_STALL);
11731 EL(ha, "restarting, isp_abort_needed\n");
11733 /* Acquire task daemon lock. */
11734 TASK_DAEMON_LOCK(ha);
11736 /* Wait for suspension to end. */
11737 while (ha->task_daemon_flags & QL_SUSPENDED) {
11738 ha->task_daemon_flags |= SUSPENDED_WAKEUP_FLG;
11740 /* 30 seconds from now */
11741 if (cv_reltimedwait(&ha->cv_dr_suspended,
11742 &ha->task_daemon_mutex, timer, TR_CLOCK_TICK) == -1) {
11744 * The timeout time 'timer' was
11745 * reached without the condition
11746 * being signaled.
11748 break;
11752 /* Release task daemon lock. */
11753 TASK_DAEMON_UNLOCK(ha);
11755 if (rval == QL_SUCCESS || rval == QL_DATA_EXISTS) {
11756 /*EMPTY*/
11757 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
11758 } else {
11759 EL(ha, "failed, rval = %xh\n", rval);
11761 return (rval);
11765 * ql_binary_fw_dump
11766 * Dumps binary data from firmware.
11768 * Input:
11769 * ha = adapter state pointer.
11770 * lock_needed = mailbox lock needed.
11772 * Returns:
11773 * ql local function return status code.
11775 * Context:
11776 * Interrupt or Kernel context, no mailbox commands allowed.
11779 ql_binary_fw_dump(ql_adapter_state_t *vha, int lock_needed)
11781 clock_t timer;
11782 mbx_cmd_t mc;
11783 mbx_cmd_t *mcp = &mc;
11784 int rval = QL_SUCCESS;
11785 ql_adapter_state_t *ha = vha->pha;
11787 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11789 if (CFG_IST(ha, CFG_CTRL_8021)) {
11790 EL(ha, "8021 not supported\n");
11791 return (QL_NOT_SUPPORTED);
11794 QL_DUMP_LOCK(ha);
11796 if (ha->ql_dump_state & QL_DUMPING ||
11797 (ha->ql_dump_state & QL_DUMP_VALID &&
11798 !(ha->ql_dump_state & QL_DUMP_UPLOADED))) {
11799 EL(ha, "dump already done, qds=%x\n", ha->ql_dump_state);
11800 QL_DUMP_UNLOCK(ha);
11801 return (QL_DATA_EXISTS);
11804 ha->ql_dump_state &= ~(QL_DUMP_VALID | QL_DUMP_UPLOADED);
11805 ha->ql_dump_state |= QL_DUMPING;
11807 QL_DUMP_UNLOCK(ha);
11809 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE)) {
11811 /* Insert Time Stamp */
11812 rval = ql_fw_etrace(ha, &ha->fwexttracebuf,
11813 FTO_INSERT_TIME_STAMP);
11814 if (rval != QL_SUCCESS) {
11815 EL(ha, "f/w extended trace insert"
11816 "time stamp failed: %xh\n", rval);
11820 if (lock_needed == TRUE) {
11821 /* Acquire mailbox register lock. */
11822 MBX_REGISTER_LOCK(ha);
11823 timer = (ha->mcp->timeout + 2) * drv_usectohz(1000000);
11825 /* Check for mailbox available, if not wait for signal. */
11826 while (ha->mailbox_flags & MBX_BUSY_FLG) {
11827 ha->mailbox_flags = (uint8_t)
11828 (ha->mailbox_flags | MBX_WANT_FLG);
11830 /* 30 seconds from now */
11831 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
11832 timer, TR_CLOCK_TICK) == -1) {
11834 * The timeout time 'timer' was
11835 * reached without the condition
11836 * being signaled.
11839 /* Release mailbox register lock. */
11840 MBX_REGISTER_UNLOCK(ha);
11842 EL(ha, "failed, rval = %xh\n",
11843 QL_FUNCTION_TIMEOUT);
11844 return (QL_FUNCTION_TIMEOUT);
11848 /* Set busy flag. */
11849 ha->mailbox_flags = (uint8_t)
11850 (ha->mailbox_flags | MBX_BUSY_FLG);
11851 mcp->timeout = 120;
11852 ha->mcp = mcp;
11854 /* Release mailbox register lock. */
11855 MBX_REGISTER_UNLOCK(ha);
11858 /* Free previous dump buffer. */
11859 if (ha->ql_dump_ptr != NULL) {
11860 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11861 ha->ql_dump_ptr = NULL;
11864 if (CFG_IST(ha, CFG_CTRL_2422)) {
11865 ha->ql_dump_size = (uint32_t)(sizeof (ql_24xx_fw_dump_t) +
11866 ha->fw_ext_memory_size);
11867 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11868 ha->ql_dump_size = (uint32_t)(sizeof (ql_25xx_fw_dump_t) +
11869 ha->fw_ext_memory_size);
11870 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11871 ha->ql_dump_size = (uint32_t)(sizeof (ql_81xx_fw_dump_t) +
11872 ha->fw_ext_memory_size);
11873 } else {
11874 ha->ql_dump_size = sizeof (ql_fw_dump_t);
11877 if ((ha->ql_dump_ptr = kmem_zalloc(ha->ql_dump_size, KM_NOSLEEP)) ==
11878 NULL) {
11879 rval = QL_MEMORY_ALLOC_FAILED;
11880 } else {
11881 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11882 rval = ql_2300_binary_fw_dump(ha, ha->ql_dump_ptr);
11883 } else if (CFG_IST(ha, CFG_CTRL_81XX)) {
11884 rval = ql_81xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11885 } else if (CFG_IST(ha, CFG_CTRL_25XX)) {
11886 rval = ql_25xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11887 } else if (CFG_IST(ha, CFG_CTRL_2422)) {
11888 rval = ql_24xx_binary_fw_dump(ha, ha->ql_dump_ptr);
11889 } else {
11890 rval = ql_2200_binary_fw_dump(ha, ha->ql_dump_ptr);
11894 /* Reset ISP chip. */
11895 ql_reset_chip(ha);
11897 QL_DUMP_LOCK(ha);
11899 if (rval != QL_SUCCESS) {
11900 if (ha->ql_dump_ptr != NULL) {
11901 kmem_free(ha->ql_dump_ptr, ha->ql_dump_size);
11902 ha->ql_dump_ptr = NULL;
11904 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_VALID |
11905 QL_DUMP_UPLOADED);
11906 EL(ha, "failed, rval = %xh\n", rval);
11907 } else {
11908 ha->ql_dump_state &= ~(QL_DUMPING | QL_DUMP_UPLOADED);
11909 ha->ql_dump_state |= QL_DUMP_VALID;
11910 EL(ha, "done\n");
11913 QL_DUMP_UNLOCK(ha);
11915 return (rval);
11919 * ql_ascii_fw_dump
11920 * Converts firmware binary dump to ascii.
11922 * Input:
11923 * ha = adapter state pointer.
11924 * bptr = buffer pointer.
11926 * Returns:
11927 * Amount of data buffer used.
11929 * Context:
11930 * Kernel context.
11932 size_t
11933 ql_ascii_fw_dump(ql_adapter_state_t *vha, caddr_t bufp)
11935 uint32_t cnt;
11936 caddr_t bp;
11937 int mbox_cnt;
11938 ql_adapter_state_t *ha = vha->pha;
11939 ql_fw_dump_t *fw = ha->ql_dump_ptr;
11941 if (CFG_IST(ha, CFG_CTRL_2422)) {
11942 return (ql_24xx_ascii_fw_dump(ha, bufp));
11943 } else if (CFG_IST(ha, CFG_CTRL_2581)) {
11944 return (ql_2581_ascii_fw_dump(ha, bufp));
11947 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
11949 if (CFG_IST(ha, CFG_CTRL_2300)) {
11950 (void) sprintf(bufp, "\nISP 2300IP ");
11951 } else if (CFG_IST(ha, CFG_CTRL_6322)) {
11952 (void) sprintf(bufp, "\nISP 6322FLX ");
11953 } else {
11954 (void) sprintf(bufp, "\nISP 2200IP ");
11957 bp = bufp + strlen(bufp);
11958 (void) sprintf(bp, "Firmware Version %d.%d.%d\n",
11959 ha->fw_major_version, ha->fw_minor_version,
11960 ha->fw_subminor_version);
11962 (void) strcat(bufp, "\nPBIU Registers:");
11963 bp = bufp + strlen(bufp);
11964 for (cnt = 0; cnt < sizeof (fw->pbiu_reg) / 2; cnt++) {
11965 if (cnt % 8 == 0) {
11966 *bp++ = '\n';
11968 (void) sprintf(bp, "%04x ", fw->pbiu_reg[cnt]);
11969 bp = bp + 6;
11972 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11973 (void) strcat(bufp, "\n\nReqQ-RspQ-Risc2Host Status "
11974 "registers:");
11975 bp = bufp + strlen(bufp);
11976 for (cnt = 0; cnt < sizeof (fw->risc_host_reg) / 2; cnt++) {
11977 if (cnt % 8 == 0) {
11978 *bp++ = '\n';
11980 (void) sprintf(bp, "%04x ", fw->risc_host_reg[cnt]);
11981 bp = bp + 6;
11985 (void) strcat(bp, "\n\nMailbox Registers:");
11986 bp = bufp + strlen(bufp);
11987 mbox_cnt = (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) ? 16 : 8;
11988 for (cnt = 0; cnt < mbox_cnt; cnt++) {
11989 if (cnt % 8 == 0) {
11990 *bp++ = '\n';
11992 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
11993 bp = bp + 6;
11996 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
11997 (void) strcat(bp, "\n\nAuto Request Response DMA Registers:");
11998 bp = bufp + strlen(bufp);
11999 for (cnt = 0; cnt < sizeof (fw->resp_dma_reg) / 2; cnt++) {
12000 if (cnt % 8 == 0) {
12001 *bp++ = '\n';
12003 (void) sprintf(bp, "%04x ", fw->resp_dma_reg[cnt]);
12004 bp = bp + 6;
12008 (void) strcat(bp, "\n\nDMA Registers:");
12009 bp = bufp + strlen(bufp);
12010 for (cnt = 0; cnt < sizeof (fw->dma_reg) / 2; cnt++) {
12011 if (cnt % 8 == 0) {
12012 *bp++ = '\n';
12014 (void) sprintf(bp, "%04x ", fw->dma_reg[cnt]);
12015 bp = bp + 6;
12018 (void) strcat(bp, "\n\nRISC Hardware Registers:");
12019 bp = bufp + strlen(bufp);
12020 for (cnt = 0; cnt < sizeof (fw->risc_hdw_reg) / 2; cnt++) {
12021 if (cnt % 8 == 0) {
12022 *bp++ = '\n';
12024 (void) sprintf(bp, "%04x ", fw->risc_hdw_reg[cnt]);
12025 bp = bp + 6;
12028 (void) strcat(bp, "\n\nRISC GP0 Registers:");
12029 bp = bufp + strlen(bufp);
12030 for (cnt = 0; cnt < sizeof (fw->risc_gp0_reg) / 2; cnt++) {
12031 if (cnt % 8 == 0) {
12032 *bp++ = '\n';
12034 (void) sprintf(bp, "%04x ", fw->risc_gp0_reg[cnt]);
12035 bp = bp + 6;
12038 (void) strcat(bp, "\n\nRISC GP1 Registers:");
12039 bp = bufp + strlen(bufp);
12040 for (cnt = 0; cnt < sizeof (fw->risc_gp1_reg) / 2; cnt++) {
12041 if (cnt % 8 == 0) {
12042 *bp++ = '\n';
12044 (void) sprintf(bp, "%04x ", fw->risc_gp1_reg[cnt]);
12045 bp = bp + 6;
12048 (void) strcat(bp, "\n\nRISC GP2 Registers:");
12049 bp = bufp + strlen(bufp);
12050 for (cnt = 0; cnt < sizeof (fw->risc_gp2_reg) / 2; cnt++) {
12051 if (cnt % 8 == 0) {
12052 *bp++ = '\n';
12054 (void) sprintf(bp, "%04x ", fw->risc_gp2_reg[cnt]);
12055 bp = bp + 6;
12058 (void) strcat(bp, "\n\nRISC GP3 Registers:");
12059 bp = bufp + strlen(bufp);
12060 for (cnt = 0; cnt < sizeof (fw->risc_gp3_reg) / 2; cnt++) {
12061 if (cnt % 8 == 0) {
12062 *bp++ = '\n';
12064 (void) sprintf(bp, "%04x ", fw->risc_gp3_reg[cnt]);
12065 bp = bp + 6;
12068 (void) strcat(bp, "\n\nRISC GP4 Registers:");
12069 bp = bufp + strlen(bufp);
12070 for (cnt = 0; cnt < sizeof (fw->risc_gp4_reg) / 2; cnt++) {
12071 if (cnt % 8 == 0) {
12072 *bp++ = '\n';
12074 (void) sprintf(bp, "%04x ", fw->risc_gp4_reg[cnt]);
12075 bp = bp + 6;
12078 (void) strcat(bp, "\n\nRISC GP5 Registers:");
12079 bp = bufp + strlen(bufp);
12080 for (cnt = 0; cnt < sizeof (fw->risc_gp5_reg) / 2; cnt++) {
12081 if (cnt % 8 == 0) {
12082 *bp++ = '\n';
12084 (void) sprintf(bp, "%04x ", fw->risc_gp5_reg[cnt]);
12085 bp = bp + 6;
12088 (void) strcat(bp, "\n\nRISC GP6 Registers:");
12089 bp = bufp + strlen(bufp);
12090 for (cnt = 0; cnt < sizeof (fw->risc_gp6_reg) / 2; cnt++) {
12091 if (cnt % 8 == 0) {
12092 *bp++ = '\n';
12094 (void) sprintf(bp, "%04x ", fw->risc_gp6_reg[cnt]);
12095 bp = bp + 6;
12098 (void) strcat(bp, "\n\nRISC GP7 Registers:");
12099 bp = bufp + strlen(bufp);
12100 for (cnt = 0; cnt < sizeof (fw->risc_gp7_reg) / 2; cnt++) {
12101 if (cnt % 8 == 0) {
12102 *bp++ = '\n';
12104 (void) sprintf(bp, "%04x ", fw->risc_gp7_reg[cnt]);
12105 bp = bp + 6;
12108 (void) strcat(bp, "\n\nFrame Buffer Hardware Registers:");
12109 bp = bufp + strlen(bufp);
12110 for (cnt = 0; cnt < sizeof (fw->frame_buf_hdw_reg) / 2; cnt++) {
12111 if ((cnt == 16) && ((CFG_IST(ha, (CFG_CTRL_2300 |
12112 CFG_CTRL_6322)) == 0))) {
12113 break;
12115 if (cnt % 8 == 0) {
12116 *bp++ = '\n';
12118 (void) sprintf(bp, "%04x ", fw->frame_buf_hdw_reg[cnt]);
12119 bp = bp + 6;
12122 (void) strcat(bp, "\n\nFPM B0 Registers:");
12123 bp = bufp + strlen(bufp);
12124 for (cnt = 0; cnt < sizeof (fw->fpm_b0_reg) / 2; cnt++) {
12125 if (cnt % 8 == 0) {
12126 *bp++ = '\n';
12128 (void) sprintf(bp, "%04x ", fw->fpm_b0_reg[cnt]);
12129 bp = bp + 6;
12132 (void) strcat(bp, "\n\nFPM B1 Registers:");
12133 bp = bufp + strlen(bufp);
12134 for (cnt = 0; cnt < sizeof (fw->fpm_b1_reg) / 2; cnt++) {
12135 if (cnt % 8 == 0) {
12136 *bp++ = '\n';
12138 (void) sprintf(bp, "%04x ", fw->fpm_b1_reg[cnt]);
12139 bp = bp + 6;
12142 if (CFG_IST(ha, (CFG_CTRL_2300 | CFG_CTRL_6322))) {
12143 (void) strcat(bp, "\n\nCode RAM Dump:");
12144 bp = bufp + strlen(bufp);
12145 for (cnt = 0; cnt < sizeof (fw->risc_ram) / 2; cnt++) {
12146 if (cnt % 8 == 0) {
12147 (void) sprintf(bp, "\n%05x: ", cnt + 0x0800);
12148 bp = bp + 8;
12150 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12151 bp = bp + 6;
12154 (void) strcat(bp, "\n\nStack RAM Dump:");
12155 bp = bufp + strlen(bufp);
12156 for (cnt = 0; cnt < sizeof (fw->stack_ram) / 2; cnt++) {
12157 if (cnt % 8 == 0) {
12158 (void) sprintf(bp, "\n%05x: ", cnt + 0x010000);
12159 bp = bp + 8;
12161 (void) sprintf(bp, "%04x ", fw->stack_ram[cnt]);
12162 bp = bp + 6;
12165 (void) strcat(bp, "\n\nData RAM Dump:");
12166 bp = bufp + strlen(bufp);
12167 for (cnt = 0; cnt < sizeof (fw->data_ram) / 2; cnt++) {
12168 if (cnt % 8 == 0) {
12169 (void) sprintf(bp, "\n%05x: ", cnt + 0x010800);
12170 bp = bp + 8;
12172 (void) sprintf(bp, "%04x ", fw->data_ram[cnt]);
12173 bp = bp + 6;
12175 } else {
12176 (void) strcat(bp, "\n\nRISC SRAM:");
12177 bp = bufp + strlen(bufp);
12178 for (cnt = 0; cnt < 0xf000; cnt++) {
12179 if (cnt % 8 == 0) {
12180 (void) sprintf(bp, "\n%04x: ", cnt + 0x1000);
12181 bp = bp + 7;
12183 (void) sprintf(bp, "%04x ", fw->risc_ram[cnt]);
12184 bp = bp + 6;
12188 (void) strcat(bp, "\n\n[<==END] ISP Debug Dump.");
12189 bp += strlen(bp);
12191 (void) sprintf(bp, "\n\nRequest Queue");
12192 bp += strlen(bp);
12193 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12194 if (cnt % 8 == 0) {
12195 (void) sprintf(bp, "\n%08x: ", cnt);
12196 bp += strlen(bp);
12198 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12199 bp += strlen(bp);
12202 (void) sprintf(bp, "\n\nResponse Queue");
12203 bp += strlen(bp);
12204 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12205 if (cnt % 8 == 0) {
12206 (void) sprintf(bp, "\n%08x: ", cnt);
12207 bp += strlen(bp);
12209 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12210 bp += strlen(bp);
12213 (void) sprintf(bp, "\n");
12215 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
12217 return (strlen(bufp));
12221 * ql_24xx_ascii_fw_dump
12222 * Converts ISP24xx firmware binary dump to ascii.
12224 * Input:
12225 * ha = adapter state pointer.
12226 * bptr = buffer pointer.
12228 * Returns:
12229 * Amount of data buffer used.
12231 * Context:
12232 * Kernel context.
12234 static size_t
12235 ql_24xx_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12237 uint32_t cnt;
12238 caddr_t bp = bufp;
12239 ql_24xx_fw_dump_t *fw = ha->ql_dump_ptr;
12241 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12243 (void) sprintf(bp, "ISP FW Version %d.%02d.%02d Attributes %X\n",
12244 ha->fw_major_version, ha->fw_minor_version,
12245 ha->fw_subminor_version, ha->fw_attributes);
12246 bp += strlen(bp);
12248 (void) sprintf(bp, "\nHCCR Register\n%08x\n", fw->hccr);
12250 (void) strcat(bp, "\nHost Interface Registers");
12251 bp += strlen(bp);
12252 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12253 if (cnt % 8 == 0) {
12254 (void) sprintf(bp++, "\n");
12257 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12258 bp += 9;
12261 (void) sprintf(bp, "\n\nMailbox Registers");
12262 bp += strlen(bp);
12263 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12264 if (cnt % 16 == 0) {
12265 (void) sprintf(bp++, "\n");
12268 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12269 bp += 5;
12272 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12273 bp += strlen(bp);
12274 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12275 if (cnt % 8 == 0) {
12276 (void) sprintf(bp++, "\n");
12279 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12280 bp += 9;
12283 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12284 bp += strlen(bp);
12285 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12286 if (cnt % 8 == 0) {
12287 (void) sprintf(bp++, "\n");
12290 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12291 bp += 9;
12294 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12295 bp += strlen(bp);
12296 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12297 if (cnt % 8 == 0) {
12298 (void) sprintf(bp++, "\n");
12301 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12302 bp += 9;
12305 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12306 bp += strlen(bp);
12307 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12308 if (cnt % 8 == 0) {
12309 (void) sprintf(bp++, "\n");
12312 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12313 bp += 9;
12316 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12317 bp += strlen(bp);
12318 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12319 if (cnt % 8 == 0) {
12320 (void) sprintf(bp++, "\n");
12323 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12324 bp += 9;
12327 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12328 bp += strlen(bp);
12329 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12330 if (cnt % 8 == 0) {
12331 (void) sprintf(bp++, "\n");
12334 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12335 bp += 9;
12338 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12339 bp += strlen(bp);
12340 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12341 if (cnt % 8 == 0) {
12342 (void) sprintf(bp++, "\n");
12345 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12346 bp += 9;
12349 (void) sprintf(bp, "\n\nCommand DMA Registers");
12350 bp += strlen(bp);
12351 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12352 if (cnt % 8 == 0) {
12353 (void) sprintf(bp++, "\n");
12356 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12357 bp += 9;
12360 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12361 bp += strlen(bp);
12362 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12363 if (cnt % 8 == 0) {
12364 (void) sprintf(bp++, "\n");
12367 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12368 bp += 9;
12371 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12372 bp += strlen(bp);
12373 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12374 if (cnt % 8 == 0) {
12375 (void) sprintf(bp++, "\n");
12378 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12379 bp += 9;
12382 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12383 bp += strlen(bp);
12384 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12385 if (cnt % 8 == 0) {
12386 (void) sprintf(bp++, "\n");
12389 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12390 bp += 9;
12393 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12394 bp += strlen(bp);
12395 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12396 if (cnt % 8 == 0) {
12397 (void) sprintf(bp++, "\n");
12400 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12401 bp += 9;
12404 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12405 bp += strlen(bp);
12406 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12407 if (cnt % 8 == 0) {
12408 (void) sprintf(bp++, "\n");
12411 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12412 bp += 9;
12415 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12416 bp += strlen(bp);
12417 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12418 if (cnt % 8 == 0) {
12419 (void) sprintf(bp++, "\n");
12422 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12423 bp += 9;
12426 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12427 bp += strlen(bp);
12428 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12429 if (cnt % 8 == 0) {
12430 (void) sprintf(bp++, "\n");
12433 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12434 bp += 9;
12437 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12438 bp += strlen(bp);
12439 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12440 if (cnt % 8 == 0) {
12441 (void) sprintf(bp++, "\n");
12444 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12445 bp += 9;
12448 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12449 bp += strlen(bp);
12450 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12451 if (cnt % 8 == 0) {
12452 (void) sprintf(bp++, "\n");
12455 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12456 bp += 9;
12459 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12460 bp += strlen(bp);
12461 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12462 if (cnt % 8 == 0) {
12463 (void) sprintf(bp++, "\n");
12466 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12467 bp += 9;
12470 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12471 bp += strlen(bp);
12472 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12473 if (cnt % 8 == 0) {
12474 (void) sprintf(bp++, "\n");
12477 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12478 bp += 9;
12481 (void) sprintf(bp, "\n\nRISC GP Registers");
12482 bp += strlen(bp);
12483 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12484 if (cnt % 8 == 0) {
12485 (void) sprintf(bp++, "\n");
12488 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12489 bp += 9;
12492 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12493 bp += strlen(bp);
12494 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12495 if (cnt % 8 == 0) {
12496 (void) sprintf(bp++, "\n");
12499 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12500 bp += 9;
12503 (void) sprintf(bp, "\n\nLMC Registers");
12504 bp += strlen(bp);
12505 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12506 if (cnt % 8 == 0) {
12507 (void) sprintf(bp++, "\n");
12510 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12511 bp += 9;
12514 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12515 bp += strlen(bp);
12516 for (cnt = 0; cnt < sizeof (fw->fpm_hdw_reg) / 4; cnt++) {
12517 if (cnt % 8 == 0) {
12518 (void) sprintf(bp++, "\n");
12521 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12522 bp += 9;
12525 (void) sprintf(bp, "\n\nFB Hardware Registers");
12526 bp += strlen(bp);
12527 for (cnt = 0; cnt < sizeof (fw->fb_hdw_reg) / 4; cnt++) {
12528 if (cnt % 8 == 0) {
12529 (void) sprintf(bp++, "\n");
12532 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12533 bp += 9;
12536 (void) sprintf(bp, "\n\nCode RAM");
12537 bp += strlen(bp);
12538 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12539 if (cnt % 8 == 0) {
12540 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
12541 bp += 11;
12544 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
12545 bp += 9;
12548 (void) sprintf(bp, "\n\nExternal Memory");
12549 bp += strlen(bp);
12550 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
12551 if (cnt % 8 == 0) {
12552 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
12553 bp += 11;
12555 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
12556 bp += 9;
12559 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
12560 bp += strlen(bp);
12562 (void) sprintf(bp, "\n\nRequest Queue");
12563 bp += strlen(bp);
12564 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
12565 if (cnt % 8 == 0) {
12566 (void) sprintf(bp, "\n%08x: ", cnt);
12567 bp += strlen(bp);
12569 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
12570 bp += strlen(bp);
12573 (void) sprintf(bp, "\n\nResponse Queue");
12574 bp += strlen(bp);
12575 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
12576 if (cnt % 8 == 0) {
12577 (void) sprintf(bp, "\n%08x: ", cnt);
12578 bp += strlen(bp);
12580 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
12581 bp += strlen(bp);
12584 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
12585 (ha->fwexttracebuf.bp != NULL)) {
12586 uint32_t cnt_b = 0;
12587 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
12589 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
12590 bp += strlen(bp);
12591 /* show data address as a byte address, data as long words */
12592 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
12593 cnt_b = cnt * 4;
12594 if (cnt_b % 32 == 0) {
12595 (void) sprintf(bp, "\n%08x: ",
12596 (int)(w64 + cnt_b));
12597 bp += 11;
12599 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
12600 bp += 9;
12604 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
12605 (ha->fwfcetracebuf.bp != NULL)) {
12606 uint32_t cnt_b = 0;
12607 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
12609 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
12610 bp += strlen(bp);
12611 /* show data address as a byte address, data as long words */
12612 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
12613 cnt_b = cnt * 4;
12614 if (cnt_b % 32 == 0) {
12615 (void) sprintf(bp, "\n%08x: ",
12616 (int)(w64 + cnt_b));
12617 bp += 11;
12619 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
12620 bp += 9;
12624 (void) sprintf(bp, "\n\n");
12625 bp += strlen(bp);
12627 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
12629 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
12631 return (cnt);
12635 * ql_2581_ascii_fw_dump
12636 * Converts ISP25xx or ISP81xx firmware binary dump to ascii.
12638 * Input:
12639 * ha = adapter state pointer.
12640 * bptr = buffer pointer.
12642 * Returns:
12643 * Amount of data buffer used.
12645 * Context:
12646 * Kernel context.
12648 static size_t
12649 ql_2581_ascii_fw_dump(ql_adapter_state_t *ha, caddr_t bufp)
12651 uint32_t cnt;
12652 uint32_t cnt1;
12653 caddr_t bp = bufp;
12654 ql_25xx_fw_dump_t *fw = ha->ql_dump_ptr;
12656 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
12658 (void) sprintf(bp, "\nISP FW Version %d.%02d.%02d Attributes %X\n",
12659 ha->fw_major_version, ha->fw_minor_version,
12660 ha->fw_subminor_version, ha->fw_attributes);
12661 bp += strlen(bp);
12663 (void) sprintf(bp, "\nR2H Status Register\n%08x\n", fw->r2h_status);
12664 bp += strlen(bp);
12666 (void) sprintf(bp, "\nHostRisc Registers");
12667 bp += strlen(bp);
12668 for (cnt = 0; cnt < sizeof (fw->hostrisc_reg) / 4; cnt++) {
12669 if (cnt % 8 == 0) {
12670 (void) sprintf(bp++, "\n");
12672 (void) sprintf(bp, "%08x ", fw->hostrisc_reg[cnt]);
12673 bp += 9;
12676 (void) sprintf(bp, "\n\nPCIe Registers");
12677 bp += strlen(bp);
12678 for (cnt = 0; cnt < sizeof (fw->pcie_reg) / 4; cnt++) {
12679 if (cnt % 8 == 0) {
12680 (void) sprintf(bp++, "\n");
12682 (void) sprintf(bp, "%08x ", fw->pcie_reg[cnt]);
12683 bp += 9;
12686 (void) strcat(bp, "\n\nHost Interface Registers");
12687 bp += strlen(bp);
12688 for (cnt = 0; cnt < sizeof (fw->host_reg) / 4; cnt++) {
12689 if (cnt % 8 == 0) {
12690 (void) sprintf(bp++, "\n");
12692 (void) sprintf(bp, "%08x ", fw->host_reg[cnt]);
12693 bp += 9;
12696 (void) sprintf(bufp + strlen(bufp), "\n\nShadow Registers");
12697 bp += strlen(bp);
12698 for (cnt = 0; cnt < sizeof (fw->shadow_reg) / 4; cnt++) {
12699 if (cnt % 8 == 0) {
12700 (void) sprintf(bp++, "\n");
12702 (void) sprintf(bp, "%08x ", fw->shadow_reg[cnt]);
12703 bp += 9;
12706 (void) sprintf(bufp + strlen(bufp), "\n\nRISC IO Register\n%08x",
12707 fw->risc_io);
12708 bp += strlen(bp);
12710 (void) sprintf(bp, "\n\nMailbox Registers");
12711 bp += strlen(bp);
12712 for (cnt = 0; cnt < sizeof (fw->mailbox_reg) / 2; cnt++) {
12713 if (cnt % 16 == 0) {
12714 (void) sprintf(bp++, "\n");
12716 (void) sprintf(bp, "%04x ", fw->mailbox_reg[cnt]);
12717 bp += 5;
12720 (void) sprintf(bp, "\n\nXSEQ GP Registers");
12721 bp += strlen(bp);
12722 for (cnt = 0; cnt < sizeof (fw->xseq_gp_reg) / 4; cnt++) {
12723 if (cnt % 8 == 0) {
12724 (void) sprintf(bp++, "\n");
12726 (void) sprintf(bp, "%08x ", fw->xseq_gp_reg[cnt]);
12727 bp += 9;
12730 (void) sprintf(bp, "\n\nXSEQ-0 Registers");
12731 bp += strlen(bp);
12732 for (cnt = 0; cnt < sizeof (fw->xseq_0_reg) / 4; cnt++) {
12733 if (cnt % 8 == 0) {
12734 (void) sprintf(bp++, "\n");
12736 (void) sprintf(bp, "%08x ", fw->xseq_0_reg[cnt]);
12737 bp += 9;
12740 (void) sprintf(bp, "\n\nXSEQ-1 Registers");
12741 bp += strlen(bp);
12742 for (cnt = 0; cnt < sizeof (fw->xseq_1_reg) / 4; cnt++) {
12743 if (cnt % 8 == 0) {
12744 (void) sprintf(bp++, "\n");
12746 (void) sprintf(bp, "%08x ", fw->xseq_1_reg[cnt]);
12747 bp += 9;
12750 (void) sprintf(bp, "\n\nRSEQ GP Registers");
12751 bp += strlen(bp);
12752 for (cnt = 0; cnt < sizeof (fw->rseq_gp_reg) / 4; cnt++) {
12753 if (cnt % 8 == 0) {
12754 (void) sprintf(bp++, "\n");
12756 (void) sprintf(bp, "%08x ", fw->rseq_gp_reg[cnt]);
12757 bp += 9;
12760 (void) sprintf(bp, "\n\nRSEQ-0 Registers");
12761 bp += strlen(bp);
12762 for (cnt = 0; cnt < sizeof (fw->rseq_0_reg) / 4; cnt++) {
12763 if (cnt % 8 == 0) {
12764 (void) sprintf(bp++, "\n");
12766 (void) sprintf(bp, "%08x ", fw->rseq_0_reg[cnt]);
12767 bp += 9;
12770 (void) sprintf(bp, "\n\nRSEQ-1 Registers");
12771 bp += strlen(bp);
12772 for (cnt = 0; cnt < sizeof (fw->rseq_1_reg) / 4; cnt++) {
12773 if (cnt % 8 == 0) {
12774 (void) sprintf(bp++, "\n");
12776 (void) sprintf(bp, "%08x ", fw->rseq_1_reg[cnt]);
12777 bp += 9;
12780 (void) sprintf(bp, "\n\nRSEQ-2 Registers");
12781 bp += strlen(bp);
12782 for (cnt = 0; cnt < sizeof (fw->rseq_2_reg) / 4; cnt++) {
12783 if (cnt % 8 == 0) {
12784 (void) sprintf(bp++, "\n");
12786 (void) sprintf(bp, "%08x ", fw->rseq_2_reg[cnt]);
12787 bp += 9;
12790 (void) sprintf(bp, "\n\nASEQ GP Registers");
12791 bp += strlen(bp);
12792 for (cnt = 0; cnt < sizeof (fw->aseq_gp_reg) / 4; cnt++) {
12793 if (cnt % 8 == 0) {
12794 (void) sprintf(bp++, "\n");
12796 (void) sprintf(bp, "%08x ", fw->aseq_gp_reg[cnt]);
12797 bp += 9;
12800 (void) sprintf(bp, "\n\nASEQ-0 Registers");
12801 bp += strlen(bp);
12802 for (cnt = 0; cnt < sizeof (fw->aseq_0_reg) / 4; cnt++) {
12803 if (cnt % 8 == 0) {
12804 (void) sprintf(bp++, "\n");
12806 (void) sprintf(bp, "%08x ", fw->aseq_0_reg[cnt]);
12807 bp += 9;
12810 (void) sprintf(bp, "\n\nASEQ-1 Registers");
12811 bp += strlen(bp);
12812 for (cnt = 0; cnt < sizeof (fw->aseq_1_reg) / 4; cnt++) {
12813 if (cnt % 8 == 0) {
12814 (void) sprintf(bp++, "\n");
12816 (void) sprintf(bp, "%08x ", fw->aseq_1_reg[cnt]);
12817 bp += 9;
12820 (void) sprintf(bp, "\n\nASEQ-2 Registers");
12821 bp += strlen(bp);
12822 for (cnt = 0; cnt < sizeof (fw->aseq_2_reg) / 4; cnt++) {
12823 if (cnt % 8 == 0) {
12824 (void) sprintf(bp++, "\n");
12826 (void) sprintf(bp, "%08x ", fw->aseq_2_reg[cnt]);
12827 bp += 9;
12830 (void) sprintf(bp, "\n\nCommand DMA Registers");
12831 bp += strlen(bp);
12832 for (cnt = 0; cnt < sizeof (fw->cmd_dma_reg) / 4; cnt++) {
12833 if (cnt % 8 == 0) {
12834 (void) sprintf(bp++, "\n");
12836 (void) sprintf(bp, "%08x ", fw->cmd_dma_reg[cnt]);
12837 bp += 9;
12840 (void) sprintf(bp, "\n\nRequest0 Queue DMA Channel Registers");
12841 bp += strlen(bp);
12842 for (cnt = 0; cnt < sizeof (fw->req0_dma_reg) / 4; cnt++) {
12843 if (cnt % 8 == 0) {
12844 (void) sprintf(bp++, "\n");
12846 (void) sprintf(bp, "%08x ", fw->req0_dma_reg[cnt]);
12847 bp += 9;
12850 (void) sprintf(bp, "\n\nResponse0 Queue DMA Channel Registers");
12851 bp += strlen(bp);
12852 for (cnt = 0; cnt < sizeof (fw->resp0_dma_reg) / 4; cnt++) {
12853 if (cnt % 8 == 0) {
12854 (void) sprintf(bp++, "\n");
12856 (void) sprintf(bp, "%08x ", fw->resp0_dma_reg[cnt]);
12857 bp += 9;
12860 (void) sprintf(bp, "\n\nRequest1 Queue DMA Channel Registers");
12861 bp += strlen(bp);
12862 for (cnt = 0; cnt < sizeof (fw->req1_dma_reg) / 4; cnt++) {
12863 if (cnt % 8 == 0) {
12864 (void) sprintf(bp++, "\n");
12866 (void) sprintf(bp, "%08x ", fw->req1_dma_reg[cnt]);
12867 bp += 9;
12870 (void) sprintf(bp, "\n\nXMT0 Data DMA Registers");
12871 bp += strlen(bp);
12872 for (cnt = 0; cnt < sizeof (fw->xmt0_dma_reg) / 4; cnt++) {
12873 if (cnt % 8 == 0) {
12874 (void) sprintf(bp++, "\n");
12876 (void) sprintf(bp, "%08x ", fw->xmt0_dma_reg[cnt]);
12877 bp += 9;
12880 (void) sprintf(bp, "\n\nXMT1 Data DMA Registers");
12881 bp += strlen(bp);
12882 for (cnt = 0; cnt < sizeof (fw->xmt1_dma_reg) / 4; cnt++) {
12883 if (cnt % 8 == 0) {
12884 (void) sprintf(bp++, "\n");
12886 (void) sprintf(bp, "%08x ", fw->xmt1_dma_reg[cnt]);
12887 bp += 9;
12890 (void) sprintf(bp, "\n\nXMT2 Data DMA Registers");
12891 bp += strlen(bp);
12892 for (cnt = 0; cnt < sizeof (fw->xmt2_dma_reg) / 4; cnt++) {
12893 if (cnt % 8 == 0) {
12894 (void) sprintf(bp++, "\n");
12896 (void) sprintf(bp, "%08x ", fw->xmt2_dma_reg[cnt]);
12897 bp += 9;
12900 (void) sprintf(bp, "\n\nXMT3 Data DMA Registers");
12901 bp += strlen(bp);
12902 for (cnt = 0; cnt < sizeof (fw->xmt3_dma_reg) / 4; cnt++) {
12903 if (cnt % 8 == 0) {
12904 (void) sprintf(bp++, "\n");
12906 (void) sprintf(bp, "%08x ", fw->xmt3_dma_reg[cnt]);
12907 bp += 9;
12910 (void) sprintf(bp, "\n\nXMT4 Data DMA Registers");
12911 bp += strlen(bp);
12912 for (cnt = 0; cnt < sizeof (fw->xmt4_dma_reg) / 4; cnt++) {
12913 if (cnt % 8 == 0) {
12914 (void) sprintf(bp++, "\n");
12916 (void) sprintf(bp, "%08x ", fw->xmt4_dma_reg[cnt]);
12917 bp += 9;
12920 (void) sprintf(bp, "\n\nXMT Data DMA Common Registers");
12921 bp += strlen(bp);
12922 for (cnt = 0; cnt < sizeof (fw->xmt_data_dma_reg) / 4; cnt++) {
12923 if (cnt % 8 == 0) {
12924 (void) sprintf(bp++, "\n");
12926 (void) sprintf(bp, "%08x ", fw->xmt_data_dma_reg[cnt]);
12927 bp += 9;
12930 (void) sprintf(bp, "\n\nRCV Thread 0 Data DMA Registers");
12931 bp += strlen(bp);
12932 for (cnt = 0; cnt < sizeof (fw->rcvt0_data_dma_reg) / 4; cnt++) {
12933 if (cnt % 8 == 0) {
12934 (void) sprintf(bp++, "\n");
12936 (void) sprintf(bp, "%08x ", fw->rcvt0_data_dma_reg[cnt]);
12937 bp += 9;
12940 (void) sprintf(bp, "\n\nRCV Thread 1 Data DMA Registers");
12941 bp += strlen(bp);
12942 for (cnt = 0; cnt < sizeof (fw->rcvt1_data_dma_reg) / 4; cnt++) {
12943 if (cnt % 8 == 0) {
12944 (void) sprintf(bp++, "\n");
12946 (void) sprintf(bp, "%08x ", fw->rcvt1_data_dma_reg[cnt]);
12947 bp += 9;
12950 (void) sprintf(bp, "\n\nRISC GP Registers");
12951 bp += strlen(bp);
12952 for (cnt = 0; cnt < sizeof (fw->risc_gp_reg) / 4; cnt++) {
12953 if (cnt % 8 == 0) {
12954 (void) sprintf(bp++, "\n");
12956 (void) sprintf(bp, "%08x ", fw->risc_gp_reg[cnt]);
12957 bp += 9;
12960 (void) sprintf(bp, "\n\nLMC Registers");
12961 bp += strlen(bp);
12962 for (cnt = 0; cnt < sizeof (fw->lmc_reg) / 4; cnt++) {
12963 if (cnt % 8 == 0) {
12964 (void) sprintf(bp++, "\n");
12966 (void) sprintf(bp, "%08x ", fw->lmc_reg[cnt]);
12967 bp += 9;
12970 (void) sprintf(bp, "\n\nFPM Hardware Registers");
12971 bp += strlen(bp);
12972 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12973 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fpm_hdw_reg)) :
12974 (uint32_t)(sizeof (fw->fpm_hdw_reg));
12975 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12976 if (cnt % 8 == 0) {
12977 (void) sprintf(bp++, "\n");
12979 (void) sprintf(bp, "%08x ", fw->fpm_hdw_reg[cnt]);
12980 bp += 9;
12983 (void) sprintf(bp, "\n\nFB Hardware Registers");
12984 bp += strlen(bp);
12985 cnt1 = CFG_IST(ha, CFG_CTRL_81XX) ?
12986 (uint32_t)(sizeof (((ql_81xx_fw_dump_t *)(fw))->fb_hdw_reg)) :
12987 (uint32_t)(sizeof (fw->fb_hdw_reg));
12988 for (cnt = 0; cnt < cnt1 / 4; cnt++) {
12989 if (cnt % 8 == 0) {
12990 (void) sprintf(bp++, "\n");
12992 (void) sprintf(bp, "%08x ", fw->fb_hdw_reg[cnt]);
12993 bp += 9;
12996 (void) sprintf(bp, "\n\nCode RAM");
12997 bp += strlen(bp);
12998 for (cnt = 0; cnt < sizeof (fw->code_ram) / 4; cnt++) {
12999 if (cnt % 8 == 0) {
13000 (void) sprintf(bp, "\n%08x: ", cnt + 0x20000);
13001 bp += 11;
13003 (void) sprintf(bp, "%08x ", fw->code_ram[cnt]);
13004 bp += 9;
13007 (void) sprintf(bp, "\n\nExternal Memory");
13008 bp += strlen(bp);
13009 for (cnt = 0; cnt < ha->fw_ext_memory_size / 4; cnt++) {
13010 if (cnt % 8 == 0) {
13011 (void) sprintf(bp, "\n%08x: ", cnt + 0x100000);
13012 bp += 11;
13014 (void) sprintf(bp, "%08x ", fw->ext_mem[cnt]);
13015 bp += 9;
13018 (void) sprintf(bp, "\n[<==END] ISP Debug Dump");
13019 bp += strlen(bp);
13021 (void) sprintf(bp, "\n\nRequest Queue");
13022 bp += strlen(bp);
13023 for (cnt = 0; cnt < REQUEST_QUEUE_SIZE / 4; cnt++) {
13024 if (cnt % 8 == 0) {
13025 (void) sprintf(bp, "\n%08x: ", cnt);
13026 bp += strlen(bp);
13028 (void) sprintf(bp, "%08x ", fw->req_q[cnt]);
13029 bp += strlen(bp);
13032 (void) sprintf(bp, "\n\nResponse Queue");
13033 bp += strlen(bp);
13034 for (cnt = 0; cnt < RESPONSE_QUEUE_SIZE / 4; cnt++) {
13035 if (cnt % 8 == 0) {
13036 (void) sprintf(bp, "\n%08x: ", cnt);
13037 bp += strlen(bp);
13039 (void) sprintf(bp, "%08x ", fw->rsp_q[cnt]);
13040 bp += strlen(bp);
13043 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13044 (ha->fwexttracebuf.bp != NULL)) {
13045 uint32_t cnt_b = 0;
13046 uint64_t w64 = (uintptr_t)ha->fwexttracebuf.bp;
13048 (void) sprintf(bp, "\n\nExtended Trace Buffer Memory");
13049 bp += strlen(bp);
13050 /* show data address as a byte address, data as long words */
13051 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13052 cnt_b = cnt * 4;
13053 if (cnt_b % 32 == 0) {
13054 (void) sprintf(bp, "\n%08x: ",
13055 (int)(w64 + cnt_b));
13056 bp += 11;
13058 (void) sprintf(bp, "%08x ", fw->ext_trace_buf[cnt]);
13059 bp += 9;
13063 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13064 (ha->fwfcetracebuf.bp != NULL)) {
13065 uint32_t cnt_b = 0;
13066 uint64_t w64 = (uintptr_t)ha->fwfcetracebuf.bp;
13068 (void) sprintf(bp, "\n\nFC Event Trace Buffer Memory");
13069 bp += strlen(bp);
13070 /* show data address as a byte address, data as long words */
13071 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13072 cnt_b = cnt * 4;
13073 if (cnt_b % 32 == 0) {
13074 (void) sprintf(bp, "\n%08x: ",
13075 (int)(w64 + cnt_b));
13076 bp += 11;
13078 (void) sprintf(bp, "%08x ", fw->fce_trace_buf[cnt]);
13079 bp += 9;
13083 (void) sprintf(bp, "\n\n");
13084 bp += strlen(bp);
13086 cnt = (uint32_t)((uintptr_t)bp - (uintptr_t)bufp);
13088 QL_PRINT_3(CE_CONT, "(%d): done=%xh\n", ha->instance, cnt);
13090 return (cnt);
13094 * ql_2200_binary_fw_dump
13096 * Input:
13097 * ha: adapter state pointer.
13098 * fw: firmware dump context pointer.
13100 * Returns:
13101 * ql local function return status code.
13103 * Context:
13104 * Interrupt or Kernel context, no mailbox commands allowed.
13106 static int
13107 ql_2200_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13109 uint32_t cnt;
13110 uint16_t risc_address;
13111 clock_t timer;
13112 mbx_cmd_t mc;
13113 mbx_cmd_t *mcp = &mc;
13114 int rval = QL_SUCCESS;
13116 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13118 /* Disable ISP interrupts. */
13119 WRT16_IO_REG(ha, ictrl, 0);
13120 ADAPTER_STATE_LOCK(ha);
13121 ha->flags &= ~INTERRUPTS_ENABLED;
13122 ADAPTER_STATE_UNLOCK(ha);
13124 /* Release mailbox registers. */
13125 WRT16_IO_REG(ha, semaphore, 0);
13127 /* Pause RISC. */
13128 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13129 timer = 30000;
13130 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13131 if (timer-- != 0) {
13132 drv_usecwait(MILLISEC);
13133 } else {
13134 rval = QL_FUNCTION_TIMEOUT;
13135 break;
13139 if (rval == QL_SUCCESS) {
13140 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13141 sizeof (fw->pbiu_reg) / 2, 16);
13143 /* In 2200 we only read 8 mailboxes */
13144 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x10,
13145 8, 16);
13147 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x20,
13148 sizeof (fw->dma_reg) / 2, 16);
13150 WRT16_IO_REG(ha, ctrl_status, 0);
13151 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13152 sizeof (fw->risc_hdw_reg) / 2, 16);
13154 WRT16_IO_REG(ha, pcr, 0x2000);
13155 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13156 sizeof (fw->risc_gp0_reg) / 2, 16);
13158 WRT16_IO_REG(ha, pcr, 0x2100);
13159 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13160 sizeof (fw->risc_gp1_reg) / 2, 16);
13162 WRT16_IO_REG(ha, pcr, 0x2200);
13163 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13164 sizeof (fw->risc_gp2_reg) / 2, 16);
13166 WRT16_IO_REG(ha, pcr, 0x2300);
13167 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13168 sizeof (fw->risc_gp3_reg) / 2, 16);
13170 WRT16_IO_REG(ha, pcr, 0x2400);
13171 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13172 sizeof (fw->risc_gp4_reg) / 2, 16);
13174 WRT16_IO_REG(ha, pcr, 0x2500);
13175 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13176 sizeof (fw->risc_gp5_reg) / 2, 16);
13178 WRT16_IO_REG(ha, pcr, 0x2600);
13179 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13180 sizeof (fw->risc_gp6_reg) / 2, 16);
13182 WRT16_IO_REG(ha, pcr, 0x2700);
13183 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13184 sizeof (fw->risc_gp7_reg) / 2, 16);
13186 WRT16_IO_REG(ha, ctrl_status, 0x10);
13187 /* 2200 has only 16 registers */
13188 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13189 ha->iobase + 0x80, 16, 16);
13191 WRT16_IO_REG(ha, ctrl_status, 0x20);
13192 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13193 sizeof (fw->fpm_b0_reg) / 2, 16);
13195 WRT16_IO_REG(ha, ctrl_status, 0x30);
13196 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13197 sizeof (fw->fpm_b1_reg) / 2, 16);
13199 /* Select FPM registers. */
13200 WRT16_IO_REG(ha, ctrl_status, 0x20);
13202 /* FPM Soft Reset. */
13203 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13205 /* Select frame buffer registers. */
13206 WRT16_IO_REG(ha, ctrl_status, 0x10);
13208 /* Reset frame buffer FIFOs. */
13209 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13211 /* Select RISC module registers. */
13212 WRT16_IO_REG(ha, ctrl_status, 0);
13214 /* Reset RISC module. */
13215 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13217 /* Reset ISP semaphore. */
13218 WRT16_IO_REG(ha, semaphore, 0);
13220 /* Release RISC module. */
13221 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13223 /* Wait for RISC to recover from reset. */
13224 timer = 30000;
13225 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13226 if (timer-- != 0) {
13227 drv_usecwait(MILLISEC);
13228 } else {
13229 rval = QL_FUNCTION_TIMEOUT;
13230 break;
13234 /* Disable RISC pause on FPM parity error. */
13235 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13238 if (rval == QL_SUCCESS) {
13239 /* Pause RISC. */
13240 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13241 timer = 30000;
13242 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13243 if (timer-- != 0) {
13244 drv_usecwait(MILLISEC);
13245 } else {
13246 rval = QL_FUNCTION_TIMEOUT;
13247 break;
13252 if (rval == QL_SUCCESS) {
13253 /* Set memory configuration and timing. */
13254 WRT16_IO_REG(ha, mctr, 0xf2);
13256 /* Release RISC. */
13257 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13259 /* Get RISC SRAM. */
13260 risc_address = 0x1000;
13261 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_WORD);
13262 for (cnt = 0; cnt < 0xf000; cnt++) {
13263 WRT16_IO_REG(ha, mailbox_in[1], risc_address++);
13264 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
13265 for (timer = 6000000; timer != 0; timer--) {
13266 /* Check for pending interrupts. */
13267 if (INTERRUPT_PENDING(ha)) {
13268 if (RD16_IO_REG(ha, semaphore) &
13269 BIT_0) {
13270 WRT16_IO_REG(ha, hccr,
13271 HC_CLR_RISC_INT);
13272 mcp->mb[0] = RD16_IO_REG(ha,
13273 mailbox_out[0]);
13274 fw->risc_ram[cnt] =
13275 RD16_IO_REG(ha,
13276 mailbox_out[2]);
13277 WRT16_IO_REG(ha,
13278 semaphore, 0);
13279 break;
13281 WRT16_IO_REG(ha, hccr,
13282 HC_CLR_RISC_INT);
13284 drv_usecwait(5);
13287 if (timer == 0) {
13288 rval = QL_FUNCTION_TIMEOUT;
13289 } else {
13290 rval = mcp->mb[0];
13293 if (rval != QL_SUCCESS) {
13294 break;
13299 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13301 return (rval);
13305 * ql_2300_binary_fw_dump
13307 * Input:
13308 * ha: adapter state pointer.
13309 * fw: firmware dump context pointer.
13311 * Returns:
13312 * ql local function return status code.
13314 * Context:
13315 * Interrupt or Kernel context, no mailbox commands allowed.
13317 static int
13318 ql_2300_binary_fw_dump(ql_adapter_state_t *ha, ql_fw_dump_t *fw)
13320 clock_t timer;
13321 int rval = QL_SUCCESS;
13323 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13325 /* Disable ISP interrupts. */
13326 WRT16_IO_REG(ha, ictrl, 0);
13327 ADAPTER_STATE_LOCK(ha);
13328 ha->flags &= ~INTERRUPTS_ENABLED;
13329 ADAPTER_STATE_UNLOCK(ha);
13331 /* Release mailbox registers. */
13332 WRT16_IO_REG(ha, semaphore, 0);
13334 /* Pause RISC. */
13335 WRT16_IO_REG(ha, hccr, HC_PAUSE_RISC);
13336 timer = 30000;
13337 while ((RD16_IO_REG(ha, hccr) & HC_RISC_PAUSE) == 0) {
13338 if (timer-- != 0) {
13339 drv_usecwait(MILLISEC);
13340 } else {
13341 rval = QL_FUNCTION_TIMEOUT;
13342 break;
13346 if (rval == QL_SUCCESS) {
13347 (void) ql_read_regs(ha, fw->pbiu_reg, ha->iobase,
13348 sizeof (fw->pbiu_reg) / 2, 16);
13350 (void) ql_read_regs(ha, fw->risc_host_reg, ha->iobase + 0x10,
13351 sizeof (fw->risc_host_reg) / 2, 16);
13353 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x40,
13354 sizeof (fw->mailbox_reg) / 2, 16);
13356 WRT16_IO_REG(ha, ctrl_status, 0x40);
13357 (void) ql_read_regs(ha, fw->resp_dma_reg, ha->iobase + 0x80,
13358 sizeof (fw->resp_dma_reg) / 2, 16);
13360 WRT16_IO_REG(ha, ctrl_status, 0x50);
13361 (void) ql_read_regs(ha, fw->dma_reg, ha->iobase + 0x80,
13362 sizeof (fw->dma_reg) / 2, 16);
13364 WRT16_IO_REG(ha, ctrl_status, 0);
13365 (void) ql_read_regs(ha, fw->risc_hdw_reg, ha->iobase + 0xA0,
13366 sizeof (fw->risc_hdw_reg) / 2, 16);
13368 WRT16_IO_REG(ha, pcr, 0x2000);
13369 (void) ql_read_regs(ha, fw->risc_gp0_reg, ha->iobase + 0x80,
13370 sizeof (fw->risc_gp0_reg) / 2, 16);
13372 WRT16_IO_REG(ha, pcr, 0x2200);
13373 (void) ql_read_regs(ha, fw->risc_gp1_reg, ha->iobase + 0x80,
13374 sizeof (fw->risc_gp1_reg) / 2, 16);
13376 WRT16_IO_REG(ha, pcr, 0x2400);
13377 (void) ql_read_regs(ha, fw->risc_gp2_reg, ha->iobase + 0x80,
13378 sizeof (fw->risc_gp2_reg) / 2, 16);
13380 WRT16_IO_REG(ha, pcr, 0x2600);
13381 (void) ql_read_regs(ha, fw->risc_gp3_reg, ha->iobase + 0x80,
13382 sizeof (fw->risc_gp3_reg) / 2, 16);
13384 WRT16_IO_REG(ha, pcr, 0x2800);
13385 (void) ql_read_regs(ha, fw->risc_gp4_reg, ha->iobase + 0x80,
13386 sizeof (fw->risc_gp4_reg) / 2, 16);
13388 WRT16_IO_REG(ha, pcr, 0x2A00);
13389 (void) ql_read_regs(ha, fw->risc_gp5_reg, ha->iobase + 0x80,
13390 sizeof (fw->risc_gp5_reg) / 2, 16);
13392 WRT16_IO_REG(ha, pcr, 0x2C00);
13393 (void) ql_read_regs(ha, fw->risc_gp6_reg, ha->iobase + 0x80,
13394 sizeof (fw->risc_gp6_reg) / 2, 16);
13396 WRT16_IO_REG(ha, pcr, 0x2E00);
13397 (void) ql_read_regs(ha, fw->risc_gp7_reg, ha->iobase + 0x80,
13398 sizeof (fw->risc_gp7_reg) / 2, 16);
13400 WRT16_IO_REG(ha, ctrl_status, 0x10);
13401 (void) ql_read_regs(ha, fw->frame_buf_hdw_reg,
13402 ha->iobase + 0x80, sizeof (fw->frame_buf_hdw_reg) / 2, 16);
13404 WRT16_IO_REG(ha, ctrl_status, 0x20);
13405 (void) ql_read_regs(ha, fw->fpm_b0_reg, ha->iobase + 0x80,
13406 sizeof (fw->fpm_b0_reg) / 2, 16);
13408 WRT16_IO_REG(ha, ctrl_status, 0x30);
13409 (void) ql_read_regs(ha, fw->fpm_b1_reg, ha->iobase + 0x80,
13410 sizeof (fw->fpm_b1_reg) / 2, 16);
13412 /* Select FPM registers. */
13413 WRT16_IO_REG(ha, ctrl_status, 0x20);
13415 /* FPM Soft Reset. */
13416 WRT16_IO_REG(ha, fpm_diag_config, 0x100);
13418 /* Select frame buffer registers. */
13419 WRT16_IO_REG(ha, ctrl_status, 0x10);
13421 /* Reset frame buffer FIFOs. */
13422 WRT16_IO_REG(ha, fb_cmd, 0xa000);
13424 /* Select RISC module registers. */
13425 WRT16_IO_REG(ha, ctrl_status, 0);
13427 /* Reset RISC module. */
13428 WRT16_IO_REG(ha, hccr, HC_RESET_RISC);
13430 /* Reset ISP semaphore. */
13431 WRT16_IO_REG(ha, semaphore, 0);
13433 /* Release RISC module. */
13434 WRT16_IO_REG(ha, hccr, HC_RELEASE_RISC);
13436 /* Wait for RISC to recover from reset. */
13437 timer = 30000;
13438 while (RD16_IO_REG(ha, mailbox_out[0]) == MBS_BUSY) {
13439 if (timer-- != 0) {
13440 drv_usecwait(MILLISEC);
13441 } else {
13442 rval = QL_FUNCTION_TIMEOUT;
13443 break;
13447 /* Disable RISC pause on FPM parity error. */
13448 WRT16_IO_REG(ha, hccr, HC_DISABLE_PARITY_PAUSE);
13451 /* Get RISC SRAM. */
13452 if (rval == QL_SUCCESS) {
13453 rval = ql_read_risc_ram(ha, 0x800, 0xf800, fw->risc_ram);
13455 /* Get STACK SRAM. */
13456 if (rval == QL_SUCCESS) {
13457 rval = ql_read_risc_ram(ha, 0x10000, 0x800, fw->stack_ram);
13459 /* Get DATA SRAM. */
13460 if (rval == QL_SUCCESS) {
13461 rval = ql_read_risc_ram(ha, 0x10800, 0xf800, fw->data_ram);
13464 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13466 return (rval);
13470 * ql_24xx_binary_fw_dump
13472 * Input:
13473 * ha: adapter state pointer.
13474 * fw: firmware dump context pointer.
13476 * Returns:
13477 * ql local function return status code.
13479 * Context:
13480 * Interrupt or Kernel context, no mailbox commands allowed.
13482 static int
13483 ql_24xx_binary_fw_dump(ql_adapter_state_t *ha, ql_24xx_fw_dump_t *fw)
13485 uint32_t *reg32;
13486 void *bp;
13487 clock_t timer;
13488 int rval = QL_SUCCESS;
13490 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13492 fw->hccr = RD32_IO_REG(ha, hccr);
13494 /* Pause RISC. */
13495 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13496 /* Disable ISP interrupts. */
13497 WRT16_IO_REG(ha, ictrl, 0);
13499 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13500 for (timer = 30000;
13501 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13502 rval == QL_SUCCESS; timer--) {
13503 if (timer) {
13504 drv_usecwait(100);
13505 } else {
13506 rval = QL_FUNCTION_TIMEOUT;
13511 if (rval == QL_SUCCESS) {
13512 /* Host interface registers. */
13513 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13514 sizeof (fw->host_reg) / 4, 32);
13516 /* Disable ISP interrupts. */
13517 WRT32_IO_REG(ha, ictrl, 0);
13518 RD32_IO_REG(ha, ictrl);
13519 ADAPTER_STATE_LOCK(ha);
13520 ha->flags &= ~INTERRUPTS_ENABLED;
13521 ADAPTER_STATE_UNLOCK(ha);
13523 /* Shadow registers. */
13525 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13526 RD32_IO_REG(ha, io_base_addr);
13528 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13529 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13530 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13531 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13533 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13534 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13535 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13536 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13538 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13539 WRT_REG_DWORD(ha, reg32, 0xB0200000);
13540 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13541 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
13543 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13544 WRT_REG_DWORD(ha, reg32, 0xB0300000);
13545 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13546 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
13548 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13549 WRT_REG_DWORD(ha, reg32, 0xB0400000);
13550 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13551 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
13553 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13554 WRT_REG_DWORD(ha, reg32, 0xB0500000);
13555 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13556 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
13558 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13559 WRT_REG_DWORD(ha, reg32, 0xB0600000);
13560 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13561 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
13563 /* Mailbox registers. */
13564 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
13565 sizeof (fw->mailbox_reg) / 2, 16);
13567 /* Transfer sequence registers. */
13569 /* XSEQ GP */
13570 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
13571 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
13572 16, 32);
13573 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
13574 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13575 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
13576 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13577 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
13578 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13579 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
13580 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13581 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
13582 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13583 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
13584 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13585 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
13586 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13588 /* XSEQ-0 */
13589 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
13590 (void) ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
13591 sizeof (fw->xseq_0_reg) / 4, 32);
13593 /* XSEQ-1 */
13594 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
13595 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
13596 sizeof (fw->xseq_1_reg) / 4, 32);
13598 /* Receive sequence registers. */
13600 /* RSEQ GP */
13601 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
13602 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
13603 16, 32);
13604 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
13605 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13606 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
13607 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13608 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
13609 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13610 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
13611 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13612 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
13613 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13614 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
13615 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13616 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
13617 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13619 /* RSEQ-0 */
13620 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
13621 (void) ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
13622 sizeof (fw->rseq_0_reg) / 4, 32);
13624 /* RSEQ-1 */
13625 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
13626 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
13627 sizeof (fw->rseq_1_reg) / 4, 32);
13629 /* RSEQ-2 */
13630 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
13631 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
13632 sizeof (fw->rseq_2_reg) / 4, 32);
13634 /* Command DMA registers. */
13636 WRT32_IO_REG(ha, io_base_addr, 0x7100);
13637 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
13638 sizeof (fw->cmd_dma_reg) / 4, 32);
13640 /* Queues. */
13642 /* RequestQ0 */
13643 WRT32_IO_REG(ha, io_base_addr, 0x7200);
13644 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
13645 8, 32);
13646 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13648 /* ResponseQ0 */
13649 WRT32_IO_REG(ha, io_base_addr, 0x7300);
13650 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
13651 8, 32);
13652 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13654 /* RequestQ1 */
13655 WRT32_IO_REG(ha, io_base_addr, 0x7400);
13656 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
13657 8, 32);
13658 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
13660 /* Transmit DMA registers. */
13662 /* XMT0 */
13663 WRT32_IO_REG(ha, io_base_addr, 0x7600);
13664 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
13665 16, 32);
13666 WRT32_IO_REG(ha, io_base_addr, 0x7610);
13667 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13669 /* XMT1 */
13670 WRT32_IO_REG(ha, io_base_addr, 0x7620);
13671 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
13672 16, 32);
13673 WRT32_IO_REG(ha, io_base_addr, 0x7630);
13674 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13676 /* XMT2 */
13677 WRT32_IO_REG(ha, io_base_addr, 0x7640);
13678 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
13679 16, 32);
13680 WRT32_IO_REG(ha, io_base_addr, 0x7650);
13681 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13683 /* XMT3 */
13684 WRT32_IO_REG(ha, io_base_addr, 0x7660);
13685 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
13686 16, 32);
13687 WRT32_IO_REG(ha, io_base_addr, 0x7670);
13688 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13690 /* XMT4 */
13691 WRT32_IO_REG(ha, io_base_addr, 0x7680);
13692 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
13693 16, 32);
13694 WRT32_IO_REG(ha, io_base_addr, 0x7690);
13695 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13697 /* XMT Common */
13698 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
13699 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
13700 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
13702 /* Receive DMA registers. */
13704 /* RCVThread0 */
13705 WRT32_IO_REG(ha, io_base_addr, 0x7700);
13706 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
13707 ha->iobase + 0xC0, 16, 32);
13708 WRT32_IO_REG(ha, io_base_addr, 0x7710);
13709 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13711 /* RCVThread1 */
13712 WRT32_IO_REG(ha, io_base_addr, 0x7720);
13713 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
13714 ha->iobase + 0xC0, 16, 32);
13715 WRT32_IO_REG(ha, io_base_addr, 0x7730);
13716 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13718 /* RISC registers. */
13720 /* RISC GP */
13721 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
13722 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
13723 16, 32);
13724 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
13725 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13726 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
13727 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13728 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
13729 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13730 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
13731 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13732 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
13733 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13734 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
13735 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13736 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13737 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13739 /* Local memory controller registers. */
13741 /* LMC */
13742 WRT32_IO_REG(ha, io_base_addr, 0x3000);
13743 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
13744 16, 32);
13745 WRT32_IO_REG(ha, io_base_addr, 0x3010);
13746 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13747 WRT32_IO_REG(ha, io_base_addr, 0x3020);
13748 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13749 WRT32_IO_REG(ha, io_base_addr, 0x3030);
13750 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13751 WRT32_IO_REG(ha, io_base_addr, 0x3040);
13752 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13753 WRT32_IO_REG(ha, io_base_addr, 0x3050);
13754 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13755 WRT32_IO_REG(ha, io_base_addr, 0x3060);
13756 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13758 /* Fibre Protocol Module registers. */
13760 /* FPM hardware */
13761 WRT32_IO_REG(ha, io_base_addr, 0x4000);
13762 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
13763 16, 32);
13764 WRT32_IO_REG(ha, io_base_addr, 0x4010);
13765 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13766 WRT32_IO_REG(ha, io_base_addr, 0x4020);
13767 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13768 WRT32_IO_REG(ha, io_base_addr, 0x4030);
13769 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13770 WRT32_IO_REG(ha, io_base_addr, 0x4040);
13771 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13772 WRT32_IO_REG(ha, io_base_addr, 0x4050);
13773 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13774 WRT32_IO_REG(ha, io_base_addr, 0x4060);
13775 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13776 WRT32_IO_REG(ha, io_base_addr, 0x4070);
13777 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13778 WRT32_IO_REG(ha, io_base_addr, 0x4080);
13779 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13780 WRT32_IO_REG(ha, io_base_addr, 0x4090);
13781 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13782 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
13783 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13784 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
13785 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13787 /* Frame Buffer registers. */
13789 /* FB hardware */
13790 WRT32_IO_REG(ha, io_base_addr, 0x6000);
13791 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
13792 16, 32);
13793 WRT32_IO_REG(ha, io_base_addr, 0x6010);
13794 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13795 WRT32_IO_REG(ha, io_base_addr, 0x6020);
13796 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13797 WRT32_IO_REG(ha, io_base_addr, 0x6030);
13798 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13799 WRT32_IO_REG(ha, io_base_addr, 0x6040);
13800 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13801 WRT32_IO_REG(ha, io_base_addr, 0x6100);
13802 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13803 WRT32_IO_REG(ha, io_base_addr, 0x6130);
13804 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13805 WRT32_IO_REG(ha, io_base_addr, 0x6150);
13806 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13807 WRT32_IO_REG(ha, io_base_addr, 0x6170);
13808 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13809 WRT32_IO_REG(ha, io_base_addr, 0x6190);
13810 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13811 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
13812 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13815 /* Get the request queue */
13816 if (rval == QL_SUCCESS) {
13817 uint32_t cnt;
13818 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
13820 /* Sync DMA buffer. */
13821 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13822 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
13823 DDI_DMA_SYNC_FORKERNEL);
13825 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
13826 fw->req_q[cnt] = *w32++;
13827 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
13831 /* Get the response queue */
13832 if (rval == QL_SUCCESS) {
13833 uint32_t cnt;
13834 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
13836 /* Sync DMA buffer. */
13837 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
13838 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
13839 DDI_DMA_SYNC_FORKERNEL);
13841 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
13842 fw->rsp_q[cnt] = *w32++;
13843 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
13847 /* Reset RISC. */
13848 ql_reset_chip(ha);
13850 /* Memory. */
13851 if (rval == QL_SUCCESS) {
13852 /* Code RAM. */
13853 rval = ql_read_risc_ram(ha, 0x20000,
13854 sizeof (fw->code_ram) / 4, fw->code_ram);
13856 if (rval == QL_SUCCESS) {
13857 /* External Memory. */
13858 rval = ql_read_risc_ram(ha, 0x100000,
13859 ha->fw_ext_memory_size / 4, fw->ext_mem);
13862 /* Get the extended trace buffer */
13863 if (rval == QL_SUCCESS) {
13864 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
13865 (ha->fwexttracebuf.bp != NULL)) {
13866 uint32_t cnt;
13867 uint32_t *w32 = ha->fwexttracebuf.bp;
13869 /* Sync DMA buffer. */
13870 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
13871 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
13873 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
13874 fw->ext_trace_buf[cnt] = *w32++;
13879 /* Get the FC event trace buffer */
13880 if (rval == QL_SUCCESS) {
13881 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
13882 (ha->fwfcetracebuf.bp != NULL)) {
13883 uint32_t cnt;
13884 uint32_t *w32 = ha->fwfcetracebuf.bp;
13886 /* Sync DMA buffer. */
13887 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
13888 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
13890 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
13891 fw->fce_trace_buf[cnt] = *w32++;
13896 if (rval != QL_SUCCESS) {
13897 EL(ha, "failed=%xh\n", rval);
13898 } else {
13899 /*EMPTY*/
13900 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
13903 return (rval);
13907 * ql_25xx_binary_fw_dump
13909 * Input:
13910 * ha: adapter state pointer.
13911 * fw: firmware dump context pointer.
13913 * Returns:
13914 * ql local function return status code.
13916 * Context:
13917 * Interrupt or Kernel context, no mailbox commands allowed.
13919 static int
13920 ql_25xx_binary_fw_dump(ql_adapter_state_t *ha, ql_25xx_fw_dump_t *fw)
13922 uint32_t *reg32;
13923 void *bp;
13924 clock_t timer;
13925 int rval = QL_SUCCESS;
13927 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
13929 fw->r2h_status = RD32_IO_REG(ha, risc2host);
13931 /* Pause RISC. */
13932 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
13933 /* Disable ISP interrupts. */
13934 WRT16_IO_REG(ha, ictrl, 0);
13936 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
13937 for (timer = 30000;
13938 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
13939 rval == QL_SUCCESS; timer--) {
13940 if (timer) {
13941 drv_usecwait(100);
13942 if (timer % 10000 == 0) {
13943 EL(ha, "risc pause %d\n", timer);
13945 } else {
13946 EL(ha, "risc pause timeout\n");
13947 rval = QL_FUNCTION_TIMEOUT;
13952 if (rval == QL_SUCCESS) {
13954 /* Host Interface registers */
13956 /* HostRisc registers. */
13957 WRT32_IO_REG(ha, io_base_addr, 0x7000);
13958 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
13959 16, 32);
13960 WRT32_IO_REG(ha, io_base_addr, 0x7010);
13961 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
13963 /* PCIe registers. */
13964 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
13965 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
13966 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
13967 3, 32);
13968 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
13969 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
13971 /* Host interface registers. */
13972 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
13973 sizeof (fw->host_reg) / 4, 32);
13975 /* Disable ISP interrupts. */
13977 WRT32_IO_REG(ha, ictrl, 0);
13978 RD32_IO_REG(ha, ictrl);
13979 ADAPTER_STATE_LOCK(ha);
13980 ha->flags &= ~INTERRUPTS_ENABLED;
13981 ADAPTER_STATE_UNLOCK(ha);
13983 /* Shadow registers. */
13985 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
13986 RD32_IO_REG(ha, io_base_addr);
13988 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13989 WRT_REG_DWORD(ha, reg32, 0xB0000000);
13990 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13991 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
13993 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13994 WRT_REG_DWORD(ha, reg32, 0xB0100000);
13995 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
13996 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
13998 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
13999 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14000 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14001 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14003 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14004 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14005 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14006 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14008 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14009 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14010 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14011 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14013 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14014 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14015 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14016 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14018 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14019 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14020 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14021 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14023 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14024 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14025 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14026 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14028 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14029 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14030 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14031 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14033 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14034 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14035 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14036 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14038 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14039 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14040 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14041 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14043 /* RISC I/O register. */
14045 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14046 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14047 1, 32);
14049 /* Mailbox registers. */
14051 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14052 sizeof (fw->mailbox_reg) / 2, 16);
14054 /* Transfer sequence registers. */
14056 /* XSEQ GP */
14057 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14058 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14059 16, 32);
14060 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14061 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14062 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14063 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14064 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14065 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14066 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14067 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14068 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14069 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14070 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14071 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14072 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14073 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14075 /* XSEQ-0 */
14076 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14077 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14078 16, 32);
14079 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14080 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14081 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14082 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14084 /* XSEQ-1 */
14085 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14086 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14087 16, 32);
14089 /* Receive sequence registers. */
14091 /* RSEQ GP */
14092 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14093 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14094 16, 32);
14095 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14096 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14097 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14098 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14099 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14100 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14101 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14102 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14103 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14104 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14105 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14106 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14107 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14108 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14110 /* RSEQ-0 */
14111 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14112 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14113 16, 32);
14114 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14115 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14117 /* RSEQ-1 */
14118 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14119 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14120 sizeof (fw->rseq_1_reg) / 4, 32);
14122 /* RSEQ-2 */
14123 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14124 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14125 sizeof (fw->rseq_2_reg) / 4, 32);
14127 /* Auxiliary sequencer registers. */
14129 /* ASEQ GP */
14130 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14131 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14132 16, 32);
14133 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14134 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14135 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14136 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14137 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14138 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14139 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14140 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14141 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14142 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14143 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14144 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14145 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14146 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14148 /* ASEQ-0 */
14149 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14150 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14151 16, 32);
14152 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14153 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14155 /* ASEQ-1 */
14156 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14157 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14158 16, 32);
14160 /* ASEQ-2 */
14161 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14162 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14163 16, 32);
14165 /* Command DMA registers. */
14167 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14168 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14169 sizeof (fw->cmd_dma_reg) / 4, 32);
14171 /* Queues. */
14173 /* RequestQ0 */
14174 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14175 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14176 8, 32);
14177 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14179 /* ResponseQ0 */
14180 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14181 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14182 8, 32);
14183 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14185 /* RequestQ1 */
14186 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14187 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14188 8, 32);
14189 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14191 /* Transmit DMA registers. */
14193 /* XMT0 */
14194 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14195 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14196 16, 32);
14197 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14198 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14200 /* XMT1 */
14201 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14202 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14203 16, 32);
14204 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14205 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14207 /* XMT2 */
14208 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14209 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14210 16, 32);
14211 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14212 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14214 /* XMT3 */
14215 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14216 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14217 16, 32);
14218 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14219 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14221 /* XMT4 */
14222 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14223 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14224 16, 32);
14225 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14226 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14228 /* XMT Common */
14229 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14230 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14231 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14233 /* Receive DMA registers. */
14235 /* RCVThread0 */
14236 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14237 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14238 ha->iobase + 0xC0, 16, 32);
14239 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14240 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14242 /* RCVThread1 */
14243 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14244 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14245 ha->iobase + 0xC0, 16, 32);
14246 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14247 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14249 /* RISC registers. */
14251 /* RISC GP */
14252 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14253 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14254 16, 32);
14255 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14256 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14257 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14258 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14259 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14260 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14261 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14262 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14263 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14264 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14265 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14266 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14267 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14268 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14270 /* Local memory controller (LMC) registers. */
14272 /* LMC */
14273 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14274 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14275 16, 32);
14276 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14277 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14278 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14279 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14280 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14281 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14282 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14283 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14284 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14285 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14286 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14287 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14288 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14289 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14291 /* Fibre Protocol Module registers. */
14293 /* FPM hardware */
14294 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14295 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14296 16, 32);
14297 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14298 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14299 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14300 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14301 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14302 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14303 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14304 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14305 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14306 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14307 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14308 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14309 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14310 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14311 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14312 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14313 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14314 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14315 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14316 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14317 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14318 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14320 /* Frame Buffer registers. */
14322 /* FB hardware */
14323 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14324 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14325 16, 32);
14326 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14327 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14328 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14329 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14330 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14331 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14332 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14333 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14334 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14335 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14336 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14337 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14338 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14339 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14340 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14341 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14342 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14343 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14344 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14345 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14346 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14347 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14350 /* Get the request queue */
14351 if (rval == QL_SUCCESS) {
14352 uint32_t cnt;
14353 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14355 /* Sync DMA buffer. */
14356 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14357 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14358 DDI_DMA_SYNC_FORKERNEL);
14360 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14361 fw->req_q[cnt] = *w32++;
14362 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14366 /* Get the respons queue */
14367 if (rval == QL_SUCCESS) {
14368 uint32_t cnt;
14369 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14371 /* Sync DMA buffer. */
14372 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14373 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14374 DDI_DMA_SYNC_FORKERNEL);
14376 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14377 fw->rsp_q[cnt] = *w32++;
14378 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14382 /* Reset RISC. */
14384 ql_reset_chip(ha);
14386 /* Memory. */
14388 if (rval == QL_SUCCESS) {
14389 /* Code RAM. */
14390 rval = ql_read_risc_ram(ha, 0x20000,
14391 sizeof (fw->code_ram) / 4, fw->code_ram);
14393 if (rval == QL_SUCCESS) {
14394 /* External Memory. */
14395 rval = ql_read_risc_ram(ha, 0x100000,
14396 ha->fw_ext_memory_size / 4, fw->ext_mem);
14399 /* Get the FC event trace buffer */
14400 if (rval == QL_SUCCESS) {
14401 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14402 (ha->fwfcetracebuf.bp != NULL)) {
14403 uint32_t cnt;
14404 uint32_t *w32 = ha->fwfcetracebuf.bp;
14406 /* Sync DMA buffer. */
14407 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14408 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14410 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14411 fw->fce_trace_buf[cnt] = *w32++;
14416 /* Get the extended trace buffer */
14417 if (rval == QL_SUCCESS) {
14418 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14419 (ha->fwexttracebuf.bp != NULL)) {
14420 uint32_t cnt;
14421 uint32_t *w32 = ha->fwexttracebuf.bp;
14423 /* Sync DMA buffer. */
14424 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14425 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14427 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14428 fw->ext_trace_buf[cnt] = *w32++;
14433 if (rval != QL_SUCCESS) {
14434 EL(ha, "failed=%xh\n", rval);
14435 } else {
14436 /*EMPTY*/
14437 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14440 return (rval);
14444 * ql_81xx_binary_fw_dump
14446 * Input:
14447 * ha: adapter state pointer.
14448 * fw: firmware dump context pointer.
14450 * Returns:
14451 * ql local function return status code.
14453 * Context:
14454 * Interrupt or Kernel context, no mailbox commands allowed.
14456 static int
14457 ql_81xx_binary_fw_dump(ql_adapter_state_t *ha, ql_81xx_fw_dump_t *fw)
14459 uint32_t *reg32;
14460 void *bp;
14461 clock_t timer;
14462 int rval = QL_SUCCESS;
14464 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
14466 fw->r2h_status = RD32_IO_REG(ha, risc2host);
14468 /* Pause RISC. */
14469 if ((RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0) {
14470 /* Disable ISP interrupts. */
14471 WRT16_IO_REG(ha, ictrl, 0);
14473 WRT32_IO_REG(ha, hccr, HC24_PAUSE_RISC);
14474 for (timer = 30000;
14475 (RD32_IO_REG(ha, risc2host) & RH_RISC_PAUSED) == 0 &&
14476 rval == QL_SUCCESS; timer--) {
14477 if (timer) {
14478 drv_usecwait(100);
14479 if (timer % 10000 == 0) {
14480 EL(ha, "risc pause %d\n", timer);
14482 } else {
14483 EL(ha, "risc pause timeout\n");
14484 rval = QL_FUNCTION_TIMEOUT;
14489 if (rval == QL_SUCCESS) {
14491 /* Host Interface registers */
14493 /* HostRisc registers. */
14494 WRT32_IO_REG(ha, io_base_addr, 0x7000);
14495 bp = ql_read_regs(ha, fw->hostrisc_reg, ha->iobase + 0xC0,
14496 16, 32);
14497 WRT32_IO_REG(ha, io_base_addr, 0x7010);
14498 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14500 /* PCIe registers. */
14501 WRT32_IO_REG(ha, io_base_addr, 0x7c00);
14502 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x1);
14503 bp = ql_read_regs(ha, fw->pcie_reg, ha->iobase + 0xC4,
14504 3, 32);
14505 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 1, 32);
14506 WRT_REG_DWORD(ha, ha->iobase + 0xc0, 0x0);
14508 /* Host interface registers. */
14509 (void) ql_read_regs(ha, fw->host_reg, ha->iobase,
14510 sizeof (fw->host_reg) / 4, 32);
14512 /* Disable ISP interrupts. */
14514 WRT32_IO_REG(ha, ictrl, 0);
14515 RD32_IO_REG(ha, ictrl);
14516 ADAPTER_STATE_LOCK(ha);
14517 ha->flags &= ~INTERRUPTS_ENABLED;
14518 ADAPTER_STATE_UNLOCK(ha);
14520 /* Shadow registers. */
14522 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14523 RD32_IO_REG(ha, io_base_addr);
14525 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14526 WRT_REG_DWORD(ha, reg32, 0xB0000000);
14527 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14528 fw->shadow_reg[0] = RD_REG_DWORD(ha, reg32);
14530 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14531 WRT_REG_DWORD(ha, reg32, 0xB0100000);
14532 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14533 fw->shadow_reg[1] = RD_REG_DWORD(ha, reg32);
14535 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14536 WRT_REG_DWORD(ha, reg32, 0xB0200000);
14537 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14538 fw->shadow_reg[2] = RD_REG_DWORD(ha, reg32);
14540 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14541 WRT_REG_DWORD(ha, reg32, 0xB0300000);
14542 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14543 fw->shadow_reg[3] = RD_REG_DWORD(ha, reg32);
14545 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14546 WRT_REG_DWORD(ha, reg32, 0xB0400000);
14547 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14548 fw->shadow_reg[4] = RD_REG_DWORD(ha, reg32);
14550 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14551 WRT_REG_DWORD(ha, reg32, 0xB0500000);
14552 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14553 fw->shadow_reg[5] = RD_REG_DWORD(ha, reg32);
14555 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14556 WRT_REG_DWORD(ha, reg32, 0xB0600000);
14557 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14558 fw->shadow_reg[6] = RD_REG_DWORD(ha, reg32);
14560 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14561 WRT_REG_DWORD(ha, reg32, 0xB0700000);
14562 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14563 fw->shadow_reg[7] = RD_REG_DWORD(ha, reg32);
14565 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14566 WRT_REG_DWORD(ha, reg32, 0xB0800000);
14567 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14568 fw->shadow_reg[8] = RD_REG_DWORD(ha, reg32);
14570 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14571 WRT_REG_DWORD(ha, reg32, 0xB0900000);
14572 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14573 fw->shadow_reg[9] = RD_REG_DWORD(ha, reg32);
14575 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xF0);
14576 WRT_REG_DWORD(ha, reg32, 0xB0A00000);
14577 reg32 = (uint32_t *)((caddr_t)ha->iobase + 0xFC);
14578 fw->shadow_reg[0xa] = RD_REG_DWORD(ha, reg32);
14580 /* RISC I/O register. */
14582 WRT32_IO_REG(ha, io_base_addr, 0x0010);
14583 (void) ql_read_regs(ha, &fw->risc_io, ha->iobase + 0xC0,
14584 1, 32);
14586 /* Mailbox registers. */
14588 (void) ql_read_regs(ha, fw->mailbox_reg, ha->iobase + 0x80,
14589 sizeof (fw->mailbox_reg) / 2, 16);
14591 /* Transfer sequence registers. */
14593 /* XSEQ GP */
14594 WRT32_IO_REG(ha, io_base_addr, 0xBF00);
14595 bp = ql_read_regs(ha, fw->xseq_gp_reg, ha->iobase + 0xC0,
14596 16, 32);
14597 WRT32_IO_REG(ha, io_base_addr, 0xBF10);
14598 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14599 WRT32_IO_REG(ha, io_base_addr, 0xBF20);
14600 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14601 WRT32_IO_REG(ha, io_base_addr, 0xBF30);
14602 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14603 WRT32_IO_REG(ha, io_base_addr, 0xBF40);
14604 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14605 WRT32_IO_REG(ha, io_base_addr, 0xBF50);
14606 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14607 WRT32_IO_REG(ha, io_base_addr, 0xBF60);
14608 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14609 WRT32_IO_REG(ha, io_base_addr, 0xBF70);
14610 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14612 /* XSEQ-0 */
14613 WRT32_IO_REG(ha, io_base_addr, 0xBFC0);
14614 bp = ql_read_regs(ha, fw->xseq_0_reg, ha->iobase + 0xC0,
14615 16, 32);
14616 WRT32_IO_REG(ha, io_base_addr, 0xBFD0);
14617 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14618 WRT32_IO_REG(ha, io_base_addr, 0xBFE0);
14619 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14621 /* XSEQ-1 */
14622 WRT32_IO_REG(ha, io_base_addr, 0xBFF0);
14623 (void) ql_read_regs(ha, fw->xseq_1_reg, ha->iobase + 0xC0,
14624 16, 32);
14626 /* Receive sequence registers. */
14628 /* RSEQ GP */
14629 WRT32_IO_REG(ha, io_base_addr, 0xFF00);
14630 bp = ql_read_regs(ha, fw->rseq_gp_reg, ha->iobase + 0xC0,
14631 16, 32);
14632 WRT32_IO_REG(ha, io_base_addr, 0xFF10);
14633 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14634 WRT32_IO_REG(ha, io_base_addr, 0xFF20);
14635 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14636 WRT32_IO_REG(ha, io_base_addr, 0xFF30);
14637 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14638 WRT32_IO_REG(ha, io_base_addr, 0xFF40);
14639 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14640 WRT32_IO_REG(ha, io_base_addr, 0xFF50);
14641 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14642 WRT32_IO_REG(ha, io_base_addr, 0xFF60);
14643 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14644 WRT32_IO_REG(ha, io_base_addr, 0xFF70);
14645 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14647 /* RSEQ-0 */
14648 WRT32_IO_REG(ha, io_base_addr, 0xFFC0);
14649 bp = ql_read_regs(ha, fw->rseq_0_reg, ha->iobase + 0xC0,
14650 16, 32);
14651 WRT32_IO_REG(ha, io_base_addr, 0xFFD0);
14652 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14654 /* RSEQ-1 */
14655 WRT32_IO_REG(ha, io_base_addr, 0xFFE0);
14656 (void) ql_read_regs(ha, fw->rseq_1_reg, ha->iobase + 0xC0,
14657 sizeof (fw->rseq_1_reg) / 4, 32);
14659 /* RSEQ-2 */
14660 WRT32_IO_REG(ha, io_base_addr, 0xFFF0);
14661 (void) ql_read_regs(ha, fw->rseq_2_reg, ha->iobase + 0xC0,
14662 sizeof (fw->rseq_2_reg) / 4, 32);
14664 /* Auxiliary sequencer registers. */
14666 /* ASEQ GP */
14667 WRT32_IO_REG(ha, io_base_addr, 0xB000);
14668 bp = ql_read_regs(ha, fw->aseq_gp_reg, ha->iobase + 0xC0,
14669 16, 32);
14670 WRT32_IO_REG(ha, io_base_addr, 0xB010);
14671 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14672 WRT32_IO_REG(ha, io_base_addr, 0xB020);
14673 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14674 WRT32_IO_REG(ha, io_base_addr, 0xB030);
14675 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14676 WRT32_IO_REG(ha, io_base_addr, 0xB040);
14677 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14678 WRT32_IO_REG(ha, io_base_addr, 0xB050);
14679 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14680 WRT32_IO_REG(ha, io_base_addr, 0xB060);
14681 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14682 WRT32_IO_REG(ha, io_base_addr, 0xB070);
14683 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14685 /* ASEQ-0 */
14686 WRT32_IO_REG(ha, io_base_addr, 0xB0C0);
14687 bp = ql_read_regs(ha, fw->aseq_0_reg, ha->iobase + 0xC0,
14688 16, 32);
14689 WRT32_IO_REG(ha, io_base_addr, 0xB0D0);
14690 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14692 /* ASEQ-1 */
14693 WRT32_IO_REG(ha, io_base_addr, 0xB0E0);
14694 (void) ql_read_regs(ha, fw->aseq_1_reg, ha->iobase + 0xC0,
14695 16, 32);
14697 /* ASEQ-2 */
14698 WRT32_IO_REG(ha, io_base_addr, 0xB0F0);
14699 (void) ql_read_regs(ha, fw->aseq_2_reg, ha->iobase + 0xC0,
14700 16, 32);
14702 /* Command DMA registers. */
14704 WRT32_IO_REG(ha, io_base_addr, 0x7100);
14705 (void) ql_read_regs(ha, fw->cmd_dma_reg, ha->iobase + 0xC0,
14706 sizeof (fw->cmd_dma_reg) / 4, 32);
14708 /* Queues. */
14710 /* RequestQ0 */
14711 WRT32_IO_REG(ha, io_base_addr, 0x7200);
14712 bp = ql_read_regs(ha, fw->req0_dma_reg, ha->iobase + 0xC0,
14713 8, 32);
14714 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14716 /* ResponseQ0 */
14717 WRT32_IO_REG(ha, io_base_addr, 0x7300);
14718 bp = ql_read_regs(ha, fw->resp0_dma_reg, ha->iobase + 0xC0,
14719 8, 32);
14720 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14722 /* RequestQ1 */
14723 WRT32_IO_REG(ha, io_base_addr, 0x7400);
14724 bp = ql_read_regs(ha, fw->req1_dma_reg, ha->iobase + 0xC0,
14725 8, 32);
14726 (void) ql_read_regs(ha, bp, ha->iobase + 0xE4, 7, 32);
14728 /* Transmit DMA registers. */
14730 /* XMT0 */
14731 WRT32_IO_REG(ha, io_base_addr, 0x7600);
14732 bp = ql_read_regs(ha, fw->xmt0_dma_reg, ha->iobase + 0xC0,
14733 16, 32);
14734 WRT32_IO_REG(ha, io_base_addr, 0x7610);
14735 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14737 /* XMT1 */
14738 WRT32_IO_REG(ha, io_base_addr, 0x7620);
14739 bp = ql_read_regs(ha, fw->xmt1_dma_reg, ha->iobase + 0xC0,
14740 16, 32);
14741 WRT32_IO_REG(ha, io_base_addr, 0x7630);
14742 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14744 /* XMT2 */
14745 WRT32_IO_REG(ha, io_base_addr, 0x7640);
14746 bp = ql_read_regs(ha, fw->xmt2_dma_reg, ha->iobase + 0xC0,
14747 16, 32);
14748 WRT32_IO_REG(ha, io_base_addr, 0x7650);
14749 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14751 /* XMT3 */
14752 WRT32_IO_REG(ha, io_base_addr, 0x7660);
14753 bp = ql_read_regs(ha, fw->xmt3_dma_reg, ha->iobase + 0xC0,
14754 16, 32);
14755 WRT32_IO_REG(ha, io_base_addr, 0x7670);
14756 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14758 /* XMT4 */
14759 WRT32_IO_REG(ha, io_base_addr, 0x7680);
14760 bp = ql_read_regs(ha, fw->xmt4_dma_reg, ha->iobase + 0xC0,
14761 16, 32);
14762 WRT32_IO_REG(ha, io_base_addr, 0x7690);
14763 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14765 /* XMT Common */
14766 WRT32_IO_REG(ha, io_base_addr, 0x76A0);
14767 (void) ql_read_regs(ha, fw->xmt_data_dma_reg,
14768 ha->iobase + 0xC0, sizeof (fw->xmt_data_dma_reg) / 4, 32);
14770 /* Receive DMA registers. */
14772 /* RCVThread0 */
14773 WRT32_IO_REG(ha, io_base_addr, 0x7700);
14774 bp = ql_read_regs(ha, fw->rcvt0_data_dma_reg,
14775 ha->iobase + 0xC0, 16, 32);
14776 WRT32_IO_REG(ha, io_base_addr, 0x7710);
14777 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14779 /* RCVThread1 */
14780 WRT32_IO_REG(ha, io_base_addr, 0x7720);
14781 bp = ql_read_regs(ha, fw->rcvt1_data_dma_reg,
14782 ha->iobase + 0xC0, 16, 32);
14783 WRT32_IO_REG(ha, io_base_addr, 0x7730);
14784 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14786 /* RISC registers. */
14788 /* RISC GP */
14789 WRT32_IO_REG(ha, io_base_addr, 0x0F00);
14790 bp = ql_read_regs(ha, fw->risc_gp_reg, ha->iobase + 0xC0,
14791 16, 32);
14792 WRT32_IO_REG(ha, io_base_addr, 0x0F10);
14793 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14794 WRT32_IO_REG(ha, io_base_addr, 0x0F20);
14795 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14796 WRT32_IO_REG(ha, io_base_addr, 0x0F30);
14797 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14798 WRT32_IO_REG(ha, io_base_addr, 0x0F40);
14799 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14800 WRT32_IO_REG(ha, io_base_addr, 0x0F50);
14801 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14802 WRT32_IO_REG(ha, io_base_addr, 0x0F60);
14803 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14804 WRT32_IO_REG(ha, io_base_addr, 0x0F70);
14805 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14807 /* Local memory controller (LMC) registers. */
14809 /* LMC */
14810 WRT32_IO_REG(ha, io_base_addr, 0x3000);
14811 bp = ql_read_regs(ha, fw->lmc_reg, ha->iobase + 0xC0,
14812 16, 32);
14813 WRT32_IO_REG(ha, io_base_addr, 0x3010);
14814 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14815 WRT32_IO_REG(ha, io_base_addr, 0x3020);
14816 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14817 WRT32_IO_REG(ha, io_base_addr, 0x3030);
14818 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14819 WRT32_IO_REG(ha, io_base_addr, 0x3040);
14820 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14821 WRT32_IO_REG(ha, io_base_addr, 0x3050);
14822 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14823 WRT32_IO_REG(ha, io_base_addr, 0x3060);
14824 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14825 WRT32_IO_REG(ha, io_base_addr, 0x3070);
14826 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14828 /* Fibre Protocol Module registers. */
14830 /* FPM hardware */
14831 WRT32_IO_REG(ha, io_base_addr, 0x4000);
14832 bp = ql_read_regs(ha, fw->fpm_hdw_reg, ha->iobase + 0xC0,
14833 16, 32);
14834 WRT32_IO_REG(ha, io_base_addr, 0x4010);
14835 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14836 WRT32_IO_REG(ha, io_base_addr, 0x4020);
14837 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14838 WRT32_IO_REG(ha, io_base_addr, 0x4030);
14839 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14840 WRT32_IO_REG(ha, io_base_addr, 0x4040);
14841 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14842 WRT32_IO_REG(ha, io_base_addr, 0x4050);
14843 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14844 WRT32_IO_REG(ha, io_base_addr, 0x4060);
14845 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14846 WRT32_IO_REG(ha, io_base_addr, 0x4070);
14847 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14848 WRT32_IO_REG(ha, io_base_addr, 0x4080);
14849 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14850 WRT32_IO_REG(ha, io_base_addr, 0x4090);
14851 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14852 WRT32_IO_REG(ha, io_base_addr, 0x40A0);
14853 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14854 WRT32_IO_REG(ha, io_base_addr, 0x40B0);
14855 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14856 WRT32_IO_REG(ha, io_base_addr, 0x40C0);
14857 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14858 WRT32_IO_REG(ha, io_base_addr, 0x40D0);
14859 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14861 /* Frame Buffer registers. */
14863 /* FB hardware */
14864 WRT32_IO_REG(ha, io_base_addr, 0x6000);
14865 bp = ql_read_regs(ha, fw->fb_hdw_reg, ha->iobase + 0xC0,
14866 16, 32);
14867 WRT32_IO_REG(ha, io_base_addr, 0x6010);
14868 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14869 WRT32_IO_REG(ha, io_base_addr, 0x6020);
14870 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14871 WRT32_IO_REG(ha, io_base_addr, 0x6030);
14872 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14873 WRT32_IO_REG(ha, io_base_addr, 0x6040);
14874 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14875 WRT32_IO_REG(ha, io_base_addr, 0x6100);
14876 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14877 WRT32_IO_REG(ha, io_base_addr, 0x6130);
14878 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14879 WRT32_IO_REG(ha, io_base_addr, 0x6150);
14880 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14881 WRT32_IO_REG(ha, io_base_addr, 0x6170);
14882 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14883 WRT32_IO_REG(ha, io_base_addr, 0x6190);
14884 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14885 WRT32_IO_REG(ha, io_base_addr, 0x61B0);
14886 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14887 WRT32_IO_REG(ha, io_base_addr, 0x61C0);
14888 bp = ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14889 WRT32_IO_REG(ha, io_base_addr, 0x6F00);
14890 (void) ql_read_regs(ha, bp, ha->iobase + 0xC0, 16, 32);
14893 /* Get the request queue */
14894 if (rval == QL_SUCCESS) {
14895 uint32_t cnt;
14896 uint32_t *w32 = (uint32_t *)ha->request_ring_bp;
14898 /* Sync DMA buffer. */
14899 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14900 REQUEST_Q_BUFFER_OFFSET, sizeof (fw->req_q),
14901 DDI_DMA_SYNC_FORKERNEL);
14903 for (cnt = 0; cnt < sizeof (fw->req_q) / 4; cnt++) {
14904 fw->req_q[cnt] = *w32++;
14905 LITTLE_ENDIAN_32(&fw->req_q[cnt]);
14909 /* Get the response queue */
14910 if (rval == QL_SUCCESS) {
14911 uint32_t cnt;
14912 uint32_t *w32 = (uint32_t *)ha->response_ring_bp;
14914 /* Sync DMA buffer. */
14915 (void) ddi_dma_sync(ha->hba_buf.dma_handle,
14916 RESPONSE_Q_BUFFER_OFFSET, sizeof (fw->rsp_q),
14917 DDI_DMA_SYNC_FORKERNEL);
14919 for (cnt = 0; cnt < sizeof (fw->rsp_q) / 4; cnt++) {
14920 fw->rsp_q[cnt] = *w32++;
14921 LITTLE_ENDIAN_32(&fw->rsp_q[cnt]);
14925 /* Reset RISC. */
14927 ql_reset_chip(ha);
14929 /* Memory. */
14931 if (rval == QL_SUCCESS) {
14932 /* Code RAM. */
14933 rval = ql_read_risc_ram(ha, 0x20000,
14934 sizeof (fw->code_ram) / 4, fw->code_ram);
14936 if (rval == QL_SUCCESS) {
14937 /* External Memory. */
14938 rval = ql_read_risc_ram(ha, 0x100000,
14939 ha->fw_ext_memory_size / 4, fw->ext_mem);
14942 /* Get the FC event trace buffer */
14943 if (rval == QL_SUCCESS) {
14944 if (CFG_IST(ha, CFG_ENABLE_FWFCETRACE) &&
14945 (ha->fwfcetracebuf.bp != NULL)) {
14946 uint32_t cnt;
14947 uint32_t *w32 = ha->fwfcetracebuf.bp;
14949 /* Sync DMA buffer. */
14950 (void) ddi_dma_sync(ha->fwfcetracebuf.dma_handle, 0,
14951 FWFCESIZE, DDI_DMA_SYNC_FORKERNEL);
14953 for (cnt = 0; cnt < FWFCESIZE / 4; cnt++) {
14954 fw->fce_trace_buf[cnt] = *w32++;
14959 /* Get the extended trace buffer */
14960 if (rval == QL_SUCCESS) {
14961 if (CFG_IST(ha, CFG_ENABLE_FWEXTTRACE) &&
14962 (ha->fwexttracebuf.bp != NULL)) {
14963 uint32_t cnt;
14964 uint32_t *w32 = ha->fwexttracebuf.bp;
14966 /* Sync DMA buffer. */
14967 (void) ddi_dma_sync(ha->fwexttracebuf.dma_handle, 0,
14968 FWEXTSIZE, DDI_DMA_SYNC_FORKERNEL);
14970 for (cnt = 0; cnt < FWEXTSIZE / 4; cnt++) {
14971 fw->ext_trace_buf[cnt] = *w32++;
14976 if (rval != QL_SUCCESS) {
14977 EL(ha, "failed=%xh\n", rval);
14978 } else {
14979 /*EMPTY*/
14980 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
14983 return (rval);
14987 * ql_read_risc_ram
14988 * Reads RISC RAM one word at a time.
14989 * Risc interrupts must be disabled when this routine is called.
14991 * Input:
14992 * ha: adapter state pointer.
14993 * risc_address: RISC code start address.
14994 * len: Number of words.
14995 * buf: buffer pointer.
14997 * Returns:
14998 * ql local function return status code.
15000 * Context:
15001 * Interrupt or Kernel context, no mailbox commands allowed.
15003 static int
15004 ql_read_risc_ram(ql_adapter_state_t *ha, uint32_t risc_address, uint32_t len,
15005 void *buf)
15007 uint32_t cnt;
15008 uint16_t stat;
15009 clock_t timer;
15010 uint16_t *buf16 = (uint16_t *)buf;
15011 uint32_t *buf32 = (uint32_t *)buf;
15012 int rval = QL_SUCCESS;
15014 for (cnt = 0; cnt < len; cnt++, risc_address++) {
15015 WRT16_IO_REG(ha, mailbox_in[0], MBC_READ_RAM_EXTENDED);
15016 WRT16_IO_REG(ha, mailbox_in[1], LSW(risc_address));
15017 WRT16_IO_REG(ha, mailbox_in[8], MSW(risc_address));
15018 if (CFG_IST(ha, CFG_CTRL_8021)) {
15019 WRT32_IO_REG(ha, nx_host_int, NX_MBX_CMD);
15020 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15021 WRT32_IO_REG(ha, hccr, HC24_SET_HOST_INT);
15022 } else {
15023 WRT16_IO_REG(ha, hccr, HC_SET_HOST_INT);
15025 for (timer = 6000000; timer && rval == QL_SUCCESS; timer--) {
15026 if (INTERRUPT_PENDING(ha)) {
15027 stat = (uint16_t)
15028 (RD16_IO_REG(ha, risc2host) & 0xff);
15029 if ((stat == 1) || (stat == 0x10)) {
15030 if (CFG_IST(ha, CFG_CTRL_24258081)) {
15031 buf32[cnt] = SHORT_TO_LONG(
15032 RD16_IO_REG(ha,
15033 mailbox_out[2]),
15034 RD16_IO_REG(ha,
15035 mailbox_out[3]));
15036 } else {
15037 buf16[cnt] =
15038 RD16_IO_REG(ha,
15039 mailbox_out[2]);
15042 break;
15043 } else if ((stat == 2) || (stat == 0x11)) {
15044 rval = RD16_IO_REG(ha, mailbox_out[0]);
15045 break;
15047 if (CFG_IST(ha, CFG_CTRL_8021)) {
15048 ql_8021_clr_hw_intr(ha);
15049 ql_8021_clr_fw_intr(ha);
15050 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15051 WRT32_IO_REG(ha, hccr,
15052 HC24_CLR_RISC_INT);
15053 RD32_IO_REG(ha, hccr);
15054 } else {
15055 WRT16_IO_REG(ha, hccr,
15056 HC_CLR_RISC_INT);
15059 drv_usecwait(5);
15061 if (CFG_IST(ha, CFG_CTRL_8021)) {
15062 ql_8021_clr_hw_intr(ha);
15063 ql_8021_clr_fw_intr(ha);
15064 } else if (CFG_IST(ha, CFG_CTRL_242581)) {
15065 WRT32_IO_REG(ha, hccr, HC24_CLR_RISC_INT);
15066 RD32_IO_REG(ha, hccr);
15067 } else {
15068 WRT16_IO_REG(ha, hccr, HC_CLR_RISC_INT);
15069 WRT16_IO_REG(ha, semaphore, 0);
15072 if (timer == 0) {
15073 rval = QL_FUNCTION_TIMEOUT;
15077 return (rval);
15081 * ql_read_regs
15082 * Reads adapter registers to buffer.
15084 * Input:
15085 * ha: adapter state pointer.
15086 * buf: buffer pointer.
15087 * reg: start address.
15088 * count: number of registers.
15089 * wds: register size.
15091 * Context:
15092 * Interrupt or Kernel context, no mailbox commands allowed.
15094 static void *
15095 ql_read_regs(ql_adapter_state_t *ha, void *buf, void *reg, uint32_t count,
15096 uint8_t wds)
15098 uint32_t *bp32, *reg32;
15099 uint16_t *bp16, *reg16;
15100 uint8_t *bp8, *reg8;
15102 switch (wds) {
15103 case 32:
15104 bp32 = buf;
15105 reg32 = reg;
15106 while (count--) {
15107 *bp32++ = RD_REG_DWORD(ha, reg32++);
15109 return (bp32);
15110 case 16:
15111 bp16 = buf;
15112 reg16 = reg;
15113 while (count--) {
15114 *bp16++ = RD_REG_WORD(ha, reg16++);
15116 return (bp16);
15117 case 8:
15118 bp8 = buf;
15119 reg8 = reg;
15120 while (count--) {
15121 *bp8++ = RD_REG_BYTE(ha, reg8++);
15123 return (bp8);
15124 default:
15125 EL(ha, "Unknown word size=%d\n", wds);
15126 return (buf);
15130 static int
15131 ql_save_config_regs(dev_info_t *dip)
15133 ql_adapter_state_t *ha;
15134 int ret;
15135 ql_config_space_t chs;
15136 caddr_t prop = "ql-config-space";
15138 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15139 if (ha == NULL) {
15140 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15141 ddi_get_instance(dip));
15142 return (DDI_FAILURE);
15145 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15147 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15148 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, prop) ==
15149 1) {
15150 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15151 return (DDI_SUCCESS);
15154 chs.chs_command = (uint16_t)ql_pci_config_get16(ha, PCI_CONF_COMM);
15155 chs.chs_header_type = (uint8_t)ql_pci_config_get8(ha,
15156 PCI_CONF_HEADER);
15157 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15158 chs.chs_bridge_control = (uint8_t)ql_pci_config_get8(ha,
15159 PCI_BCNF_BCNTRL);
15162 chs.chs_cache_line_size = (uint8_t)ql_pci_config_get8(ha,
15163 PCI_CONF_CACHE_LINESZ);
15165 chs.chs_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15166 PCI_CONF_LATENCY_TIMER);
15168 if ((chs.chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15169 chs.chs_sec_latency_timer = (uint8_t)ql_pci_config_get8(ha,
15170 PCI_BCNF_LATENCY_TIMER);
15173 chs.chs_base0 = ql_pci_config_get32(ha, PCI_CONF_BASE0);
15174 chs.chs_base1 = ql_pci_config_get32(ha, PCI_CONF_BASE1);
15175 chs.chs_base2 = ql_pci_config_get32(ha, PCI_CONF_BASE2);
15176 chs.chs_base3 = ql_pci_config_get32(ha, PCI_CONF_BASE3);
15177 chs.chs_base4 = ql_pci_config_get32(ha, PCI_CONF_BASE4);
15178 chs.chs_base5 = ql_pci_config_get32(ha, PCI_CONF_BASE5);
15180 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15181 ret = ndi_prop_update_byte_array(DDI_DEV_T_NONE, dip, prop,
15182 (uchar_t *)&chs, sizeof (ql_config_space_t));
15184 if (ret != DDI_PROP_SUCCESS) {
15185 cmn_err(CE_WARN, "!Qlogic %s(%d) can't update prop %s",
15186 QL_NAME, ddi_get_instance(dip), prop);
15187 return (DDI_FAILURE);
15190 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15192 return (DDI_SUCCESS);
15195 static int
15196 ql_restore_config_regs(dev_info_t *dip)
15198 ql_adapter_state_t *ha;
15199 uint_t elements;
15200 ql_config_space_t *chs_p;
15201 caddr_t prop = "ql-config-space";
15203 ha = ddi_get_soft_state(ql_state, ddi_get_instance(dip));
15204 if (ha == NULL) {
15205 QL_PRINT_2(CE_CONT, "(%d): no adapter\n",
15206 ddi_get_instance(dip));
15207 return (DDI_FAILURE);
15210 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15212 /*LINTED [Solaris DDI_DEV_T_ANY Lint warning]*/
15213 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, dip,
15214 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, prop,
15215 (uchar_t **)&chs_p, &elements) != DDI_PROP_SUCCESS) {
15216 QL_PRINT_2(CE_CONT, "(%d): no prop exit\n", ha->instance);
15217 return (DDI_FAILURE);
15220 ql_pci_config_put16(ha, PCI_CONF_COMM, chs_p->chs_command);
15222 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15223 ql_pci_config_put16(ha, PCI_BCNF_BCNTRL,
15224 chs_p->chs_bridge_control);
15227 ql_pci_config_put8(ha, PCI_CONF_CACHE_LINESZ,
15228 chs_p->chs_cache_line_size);
15230 ql_pci_config_put8(ha, PCI_CONF_LATENCY_TIMER,
15231 chs_p->chs_latency_timer);
15233 if ((chs_p->chs_header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
15234 ql_pci_config_put8(ha, PCI_BCNF_LATENCY_TIMER,
15235 chs_p->chs_sec_latency_timer);
15238 ql_pci_config_put32(ha, PCI_CONF_BASE0, chs_p->chs_base0);
15239 ql_pci_config_put32(ha, PCI_CONF_BASE1, chs_p->chs_base1);
15240 ql_pci_config_put32(ha, PCI_CONF_BASE2, chs_p->chs_base2);
15241 ql_pci_config_put32(ha, PCI_CONF_BASE3, chs_p->chs_base3);
15242 ql_pci_config_put32(ha, PCI_CONF_BASE4, chs_p->chs_base4);
15243 ql_pci_config_put32(ha, PCI_CONF_BASE5, chs_p->chs_base5);
15245 ddi_prop_free(chs_p);
15247 /*LINTED [Solaris DDI_DEV_T_NONE Lint warning]*/
15248 if (ndi_prop_remove(DDI_DEV_T_NONE, dip, prop) != DDI_PROP_SUCCESS) {
15249 cmn_err(CE_WARN, "!Qlogic %s(%d): can't remove prop %s",
15250 QL_NAME, ddi_get_instance(dip), prop);
15253 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15255 return (DDI_SUCCESS);
15258 uint8_t
15259 ql_pci_config_get8(ql_adapter_state_t *ha, off_t off)
15261 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15262 return (ddi_get8(ha->sbus_config_handle,
15263 (uint8_t *)(ha->sbus_config_base + off)));
15266 #ifdef KERNEL_32
15267 return (pci_config_getb(ha->pci_handle, off));
15268 #else
15269 return (pci_config_get8(ha->pci_handle, off));
15270 #endif
15273 uint16_t
15274 ql_pci_config_get16(ql_adapter_state_t *ha, off_t off)
15276 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15277 return (ddi_get16(ha->sbus_config_handle,
15278 (uint16_t *)(ha->sbus_config_base + off)));
15281 #ifdef KERNEL_32
15282 return (pci_config_getw(ha->pci_handle, off));
15283 #else
15284 return (pci_config_get16(ha->pci_handle, off));
15285 #endif
15288 uint32_t
15289 ql_pci_config_get32(ql_adapter_state_t *ha, off_t off)
15291 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15292 return (ddi_get32(ha->sbus_config_handle,
15293 (uint32_t *)(ha->sbus_config_base + off)));
15296 #ifdef KERNEL_32
15297 return (pci_config_getl(ha->pci_handle, off));
15298 #else
15299 return (pci_config_get32(ha->pci_handle, off));
15300 #endif
15303 void
15304 ql_pci_config_put8(ql_adapter_state_t *ha, off_t off, uint8_t val)
15306 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15307 ddi_put8(ha->sbus_config_handle,
15308 (uint8_t *)(ha->sbus_config_base + off), val);
15309 } else {
15310 #ifdef KERNEL_32
15311 pci_config_putb(ha->pci_handle, off, val);
15312 #else
15313 pci_config_put8(ha->pci_handle, off, val);
15314 #endif
15318 void
15319 ql_pci_config_put16(ql_adapter_state_t *ha, off_t off, uint16_t val)
15321 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15322 ddi_put16(ha->sbus_config_handle,
15323 (uint16_t *)(ha->sbus_config_base + off), val);
15324 } else {
15325 #ifdef KERNEL_32
15326 pci_config_putw(ha->pci_handle, off, val);
15327 #else
15328 pci_config_put16(ha->pci_handle, off, val);
15329 #endif
15333 void
15334 ql_pci_config_put32(ql_adapter_state_t *ha, off_t off, uint32_t val)
15336 if (CFG_IST(ha, CFG_SBUS_CARD)) {
15337 ddi_put32(ha->sbus_config_handle,
15338 (uint32_t *)(ha->sbus_config_base + off), val);
15339 } else {
15340 #ifdef KERNEL_32
15341 pci_config_putl(ha->pci_handle, off, val);
15342 #else
15343 pci_config_put32(ha->pci_handle, off, val);
15344 #endif
15349 * ql_halt
15350 * Waits for commands that are running to finish and
15351 * if they do not, commands are aborted.
15352 * Finally the adapter is reset.
15354 * Input:
15355 * ha: adapter state pointer.
15356 * pwr: power state.
15358 * Context:
15359 * Kernel context.
15361 static void
15362 ql_halt(ql_adapter_state_t *ha, int pwr)
15364 uint32_t cnt;
15365 ql_tgt_t *tq;
15366 ql_srb_t *sp;
15367 uint16_t index;
15368 ql_link_t *link;
15370 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15372 /* Wait for all commands running to finish. */
15373 for (index = 0; index < DEVICE_HEAD_LIST_SIZE; index++) {
15374 for (link = ha->dev[index].first; link != NULL;
15375 link = link->next) {
15376 tq = link->base_address;
15377 (void) ql_abort_device(ha, tq, 0);
15379 /* Wait for 30 seconds for commands to finish. */
15380 for (cnt = 3000; cnt != 0; cnt--) {
15381 /* Acquire device queue lock. */
15382 DEVICE_QUEUE_LOCK(tq);
15383 if (tq->outcnt == 0) {
15384 /* Release device queue lock. */
15385 DEVICE_QUEUE_UNLOCK(tq);
15386 break;
15387 } else {
15388 /* Release device queue lock. */
15389 DEVICE_QUEUE_UNLOCK(tq);
15390 ql_delay(ha, 10000);
15394 /* Finish any commands waiting for more status. */
15395 if (ha->status_srb != NULL) {
15396 sp = ha->status_srb;
15397 ha->status_srb = NULL;
15398 sp->cmd.next = NULL;
15399 ql_done(&sp->cmd);
15402 /* Abort commands that did not finish. */
15403 if (cnt == 0) {
15404 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS;
15405 cnt++) {
15406 if (ha->pending_cmds.first != NULL) {
15407 ql_start_iocb(ha, NULL);
15408 cnt = 1;
15410 sp = ha->outstanding_cmds[cnt];
15411 if (sp != NULL &&
15412 sp->lun_queue->target_queue ==
15413 tq) {
15414 (void) ql_abort((opaque_t)ha,
15415 sp->pkt, 0);
15422 /* Shutdown IP. */
15423 if (ha->flags & IP_INITIALIZED) {
15424 (void) ql_shutdown_ip(ha);
15427 /* Stop all timers. */
15428 ADAPTER_STATE_LOCK(ha);
15429 ha->port_retry_timer = 0;
15430 ha->loop_down_timer = LOOP_DOWN_TIMER_OFF;
15431 ha->watchdog_timer = 0;
15432 ADAPTER_STATE_UNLOCK(ha);
15434 if (pwr == PM_LEVEL_D3) {
15435 ADAPTER_STATE_LOCK(ha);
15436 ha->flags &= ~ONLINE;
15437 ADAPTER_STATE_UNLOCK(ha);
15439 /* Reset ISP chip. */
15440 ql_reset_chip(ha);
15443 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15447 * ql_get_dma_mem
15448 * Function used to allocate dma memory.
15450 * Input:
15451 * ha: adapter state pointer.
15452 * mem: pointer to dma memory object.
15453 * size: size of the request in bytes
15455 * Returns:
15456 * qn local function return status code.
15458 * Context:
15459 * Kernel context.
15462 ql_get_dma_mem(ql_adapter_state_t *ha, dma_mem_t *mem, uint32_t size,
15463 mem_alloc_type_t allocation_type, mem_alignment_t alignment)
15465 int rval;
15467 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15469 mem->size = size;
15470 mem->type = allocation_type;
15471 mem->cookie_count = 1;
15473 switch (alignment) {
15474 case QL_DMA_DATA_ALIGN:
15475 mem->alignment = QL_DMA_ALIGN_8_BYTE_BOUNDARY;
15476 break;
15477 case QL_DMA_RING_ALIGN:
15478 mem->alignment = QL_DMA_ALIGN_64_BYTE_BOUNDARY;
15479 break;
15480 default:
15481 EL(ha, "failed, unknown alignment type %x\n", alignment);
15482 break;
15485 if ((rval = ql_alloc_phys(ha, mem, KM_SLEEP)) != QL_SUCCESS) {
15486 ql_free_phys(ha, mem);
15487 EL(ha, "failed, alloc_phys=%xh\n", rval);
15490 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15492 return (rval);
15496 * ql_alloc_phys
15497 * Function used to allocate memory and zero it.
15498 * Memory is below 4 GB.
15500 * Input:
15501 * ha: adapter state pointer.
15502 * mem: pointer to dma memory object.
15503 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15504 * mem->cookie_count number of segments allowed.
15505 * mem->type memory allocation type.
15506 * mem->size memory size.
15507 * mem->alignment memory alignment.
15509 * Returns:
15510 * qn local function return status code.
15512 * Context:
15513 * Kernel context.
15516 ql_alloc_phys(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15518 size_t rlen;
15519 ddi_dma_attr_t dma_attr;
15520 ddi_device_acc_attr_t acc_attr = ql_dev_acc_attr;
15522 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15524 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15525 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15527 dma_attr.dma_attr_align = mem->alignment; /* DMA address alignment */
15528 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15531 * Workaround for SUN XMITS buffer must end and start on 8 byte
15532 * boundary. Else, hardware will overrun the buffer. Simple fix is
15533 * to make sure buffer has enough room for overrun.
15535 if (mem->size & 7) {
15536 mem->size += 8 - (mem->size & 7);
15539 mem->flags = DDI_DMA_CONSISTENT;
15542 * Allocate DMA memory for command.
15544 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15545 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15546 DDI_SUCCESS) {
15547 EL(ha, "failed, ddi_dma_alloc_handle\n");
15548 mem->dma_handle = NULL;
15549 return (QL_MEMORY_ALLOC_FAILED);
15552 switch (mem->type) {
15553 case KERNEL_MEM:
15554 mem->bp = kmem_zalloc(mem->size, sleep);
15555 break;
15556 case BIG_ENDIAN_DMA:
15557 case LITTLE_ENDIAN_DMA:
15558 case NO_SWAP_DMA:
15559 if (mem->type == BIG_ENDIAN_DMA) {
15560 acc_attr.devacc_attr_endian_flags =
15561 DDI_STRUCTURE_BE_ACC;
15562 } else if (mem->type == NO_SWAP_DMA) {
15563 acc_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
15565 if (ddi_dma_mem_alloc(mem->dma_handle, mem->size, &acc_attr,
15566 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15567 DDI_DMA_DONTWAIT, NULL, (caddr_t *)&mem->bp, &rlen,
15568 &mem->acc_handle) == DDI_SUCCESS) {
15569 bzero(mem->bp, mem->size);
15570 /* ensure we got what we asked for (32bit) */
15571 if (dma_attr.dma_attr_addr_hi == (uintptr_t)NULL) {
15572 if (mem->cookie.dmac_notused !=
15573 (uintptr_t)NULL) {
15574 EL(ha, "failed, ddi_dma_mem_alloc "
15575 "returned 64 bit DMA address\n");
15576 ql_free_phys(ha, mem);
15577 return (QL_MEMORY_ALLOC_FAILED);
15580 } else {
15581 mem->acc_handle = NULL;
15582 mem->bp = NULL;
15584 break;
15585 default:
15586 EL(ha, "failed, unknown type=%xh\n", mem->type);
15587 mem->acc_handle = NULL;
15588 mem->bp = NULL;
15589 break;
15592 if (mem->bp == NULL) {
15593 EL(ha, "failed, ddi_dma_mem_alloc\n");
15594 ddi_dma_free_handle(&mem->dma_handle);
15595 mem->dma_handle = NULL;
15596 return (QL_MEMORY_ALLOC_FAILED);
15599 mem->flags |= DDI_DMA_RDWR;
15601 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15602 EL(ha, "failed, ddi_dma_addr_bind_handle\n");
15603 ql_free_phys(ha, mem);
15604 return (QL_MEMORY_ALLOC_FAILED);
15607 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15609 return (QL_SUCCESS);
15613 * ql_free_phys
15614 * Function used to free physical memory.
15616 * Input:
15617 * ha: adapter state pointer.
15618 * mem: pointer to dma memory object.
15620 * Context:
15621 * Kernel context.
15623 void
15624 ql_free_phys(ql_adapter_state_t *ha, dma_mem_t *mem)
15626 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15628 if (mem != NULL && mem->dma_handle != NULL) {
15629 ql_unbind_dma_buffer(ha, mem);
15630 switch (mem->type) {
15631 case KERNEL_MEM:
15632 if (mem->bp != NULL) {
15633 kmem_free(mem->bp, mem->size);
15635 break;
15636 case LITTLE_ENDIAN_DMA:
15637 case BIG_ENDIAN_DMA:
15638 case NO_SWAP_DMA:
15639 if (mem->acc_handle != NULL) {
15640 ddi_dma_mem_free(&mem->acc_handle);
15641 mem->acc_handle = NULL;
15643 break;
15644 default:
15645 break;
15647 mem->bp = NULL;
15648 ddi_dma_free_handle(&mem->dma_handle);
15649 mem->dma_handle = NULL;
15652 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15656 * ql_alloc_dma_resouce.
15657 * Allocates DMA resource for buffer.
15659 * Input:
15660 * ha: adapter state pointer.
15661 * mem: pointer to dma memory object.
15662 * sleep: KM_SLEEP/KM_NOSLEEP flag.
15663 * mem->cookie_count number of segments allowed.
15664 * mem->type memory allocation type.
15665 * mem->size memory size.
15666 * mem->bp pointer to memory or struct buf
15668 * Returns:
15669 * qn local function return status code.
15671 * Context:
15672 * Kernel context.
15675 ql_alloc_dma_resouce(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15677 ddi_dma_attr_t dma_attr;
15679 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15681 dma_attr = CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING) ?
15682 ql_64bit_io_dma_attr : ql_32bit_io_dma_attr;
15683 dma_attr.dma_attr_sgllen = (int)mem->cookie_count;
15686 * Allocate DMA handle for command.
15688 if (ddi_dma_alloc_handle(ha->dip, &dma_attr, (sleep == KM_SLEEP) ?
15689 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->dma_handle) !=
15690 DDI_SUCCESS) {
15691 EL(ha, "failed, ddi_dma_alloc_handle\n");
15692 mem->dma_handle = NULL;
15693 return (QL_MEMORY_ALLOC_FAILED);
15696 mem->flags = DDI_DMA_RDWR | DDI_DMA_CONSISTENT;
15698 if (ql_bind_dma_buffer(ha, mem, sleep) != DDI_DMA_MAPPED) {
15699 EL(ha, "failed, bind_dma_buffer\n");
15700 ddi_dma_free_handle(&mem->dma_handle);
15701 mem->dma_handle = NULL;
15702 return (QL_MEMORY_ALLOC_FAILED);
15705 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15707 return (QL_SUCCESS);
15711 * ql_free_dma_resource
15712 * Frees DMA resources.
15714 * Input:
15715 * ha: adapter state pointer.
15716 * mem: pointer to dma memory object.
15717 * mem->dma_handle DMA memory handle.
15719 * Context:
15720 * Kernel context.
15722 void
15723 ql_free_dma_resource(ql_adapter_state_t *ha, dma_mem_t *mem)
15725 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15727 ql_free_phys(ha, mem);
15729 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15733 * ql_bind_dma_buffer
15734 * Binds DMA buffer.
15736 * Input:
15737 * ha: adapter state pointer.
15738 * mem: pointer to dma memory object.
15739 * sleep: KM_SLEEP or KM_NOSLEEP.
15740 * mem->dma_handle DMA memory handle.
15741 * mem->cookie_count number of segments allowed.
15742 * mem->type memory allocation type.
15743 * mem->size memory size.
15744 * mem->bp pointer to memory or struct buf
15746 * Returns:
15747 * mem->cookies pointer to list of cookies.
15748 * mem->cookie_count number of cookies.
15749 * status success = DDI_DMA_MAPPED
15750 * DDI_DMA_PARTIAL_MAP, DDI_DMA_INUSE,
15751 * DDI_DMA_NORESOURCES, DDI_DMA_NOMAPPING or
15752 * DDI_DMA_TOOBIG
15754 * Context:
15755 * Kernel context.
15757 static int
15758 ql_bind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem, int sleep)
15760 int rval;
15761 ddi_dma_cookie_t *cookiep;
15762 uint32_t cnt = mem->cookie_count;
15764 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15766 if (mem->type == STRUCT_BUF_MEMORY) {
15767 rval = ddi_dma_buf_bind_handle(mem->dma_handle, mem->bp,
15768 mem->flags, (sleep == KM_SLEEP) ? DDI_DMA_SLEEP :
15769 DDI_DMA_DONTWAIT, NULL, &mem->cookie, &mem->cookie_count);
15770 } else {
15771 rval = ddi_dma_addr_bind_handle(mem->dma_handle, NULL, mem->bp,
15772 mem->size, mem->flags, (sleep == KM_SLEEP) ?
15773 DDI_DMA_SLEEP : DDI_DMA_DONTWAIT, NULL, &mem->cookie,
15774 &mem->cookie_count);
15777 if (rval == DDI_DMA_MAPPED) {
15778 if (mem->cookie_count > cnt) {
15779 (void) ddi_dma_unbind_handle(mem->dma_handle);
15780 EL(ha, "failed, cookie_count %d > %d\n",
15781 mem->cookie_count, cnt);
15782 rval = DDI_DMA_TOOBIG;
15783 } else {
15784 if (mem->cookie_count > 1) {
15785 if (mem->cookies = kmem_zalloc(
15786 sizeof (ddi_dma_cookie_t) *
15787 mem->cookie_count, sleep)) {
15788 *mem->cookies = mem->cookie;
15789 cookiep = mem->cookies;
15790 for (cnt = 1; cnt < mem->cookie_count;
15791 cnt++) {
15792 ddi_dma_nextcookie(
15793 mem->dma_handle,
15794 ++cookiep);
15796 } else {
15797 (void) ddi_dma_unbind_handle(
15798 mem->dma_handle);
15799 EL(ha, "failed, kmem_zalloc\n");
15800 rval = DDI_DMA_NORESOURCES;
15802 } else {
15804 * It has been reported that dmac_size at times
15805 * may be incorrect on sparc machines so for
15806 * sparc machines that only have one segment
15807 * use the buffer size instead.
15809 mem->cookies = &mem->cookie;
15810 mem->cookies->dmac_size = mem->size;
15815 if (rval != DDI_DMA_MAPPED) {
15816 EL(ha, "failed=%xh\n", rval);
15817 } else {
15818 /*EMPTY*/
15819 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15822 return (rval);
15826 * ql_unbind_dma_buffer
15827 * Unbinds DMA buffer.
15829 * Input:
15830 * ha: adapter state pointer.
15831 * mem: pointer to dma memory object.
15832 * mem->dma_handle DMA memory handle.
15833 * mem->cookies pointer to cookie list.
15834 * mem->cookie_count number of cookies.
15836 * Context:
15837 * Kernel context.
15839 /* ARGSUSED */
15840 static void
15841 ql_unbind_dma_buffer(ql_adapter_state_t *ha, dma_mem_t *mem)
15843 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15845 (void) ddi_dma_unbind_handle(mem->dma_handle);
15846 if (mem->cookie_count > 1) {
15847 kmem_free(mem->cookies, sizeof (ddi_dma_cookie_t) *
15848 mem->cookie_count);
15849 mem->cookies = NULL;
15851 mem->cookie_count = 0;
15853 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15856 static int
15857 ql_suspend_adapter(ql_adapter_state_t *ha)
15859 clock_t timer = 32 * drv_usectohz(1000000);
15861 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
15864 * First we will claim mbox ownership so that no
15865 * thread using mbox hangs when we disable the
15866 * interrupt in the middle of it.
15868 MBX_REGISTER_LOCK(ha);
15870 /* Check for mailbox available, if not wait for signal. */
15871 while (ha->mailbox_flags & MBX_BUSY_FLG) {
15872 ha->mailbox_flags = (uint8_t)
15873 (ha->mailbox_flags | MBX_WANT_FLG);
15875 /* 30 seconds from now */
15876 if (cv_reltimedwait(&ha->cv_mbx_wait, &ha->mbx_mutex,
15877 timer, TR_CLOCK_TICK) == -1) {
15879 /* Release mailbox register lock. */
15880 MBX_REGISTER_UNLOCK(ha);
15881 EL(ha, "failed, Suspend mbox");
15882 return (QL_FUNCTION_TIMEOUT);
15886 /* Set busy flag. */
15887 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags | MBX_BUSY_FLG);
15888 MBX_REGISTER_UNLOCK(ha);
15890 (void) ql_wait_outstanding(ha);
15893 * here we are sure that there will not be any mbox interrupt.
15894 * So, let's make sure that we return back all the outstanding
15895 * cmds as well as internally queued commands.
15897 ql_halt(ha, PM_LEVEL_D0);
15899 if (ha->power_level != PM_LEVEL_D3) {
15900 /* Disable ISP interrupts. */
15901 WRT16_IO_REG(ha, ictrl, 0);
15904 ADAPTER_STATE_LOCK(ha);
15905 ha->flags &= ~INTERRUPTS_ENABLED;
15906 ADAPTER_STATE_UNLOCK(ha);
15908 MBX_REGISTER_LOCK(ha);
15909 /* Reset busy status. */
15910 ha->mailbox_flags = (uint8_t)(ha->mailbox_flags & ~MBX_BUSY_FLG);
15912 /* If thread is waiting for mailbox go signal it to start. */
15913 if (ha->mailbox_flags & MBX_WANT_FLG) {
15914 ha->mailbox_flags = (uint8_t)
15915 (ha->mailbox_flags & ~MBX_WANT_FLG);
15916 cv_broadcast(&ha->cv_mbx_wait);
15918 /* Release mailbox register lock. */
15919 MBX_REGISTER_UNLOCK(ha);
15921 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
15923 return (QL_SUCCESS);
15927 * ql_add_link_b
15928 * Add link to the end of the chain.
15930 * Input:
15931 * head = Head of link list.
15932 * link = link to be added.
15933 * LOCK must be already obtained.
15935 * Context:
15936 * Interrupt or Kernel context, no mailbox commands allowed.
15938 void
15939 ql_add_link_b(ql_head_t *head, ql_link_t *link)
15941 /* at the end there isn't a next */
15942 link->next = NULL;
15944 if ((link->prev = head->last) == NULL) {
15945 head->first = link;
15946 } else {
15947 head->last->next = link;
15950 head->last = link;
15951 link->head = head; /* the queue we're on */
15955 * ql_add_link_t
15956 * Add link to the beginning of the chain.
15958 * Input:
15959 * head = Head of link list.
15960 * link = link to be added.
15961 * LOCK must be already obtained.
15963 * Context:
15964 * Interrupt or Kernel context, no mailbox commands allowed.
15966 void
15967 ql_add_link_t(ql_head_t *head, ql_link_t *link)
15969 link->prev = NULL;
15971 if ((link->next = head->first) == NULL) {
15972 head->last = link;
15973 } else {
15974 head->first->prev = link;
15977 head->first = link;
15978 link->head = head; /* the queue we're on */
15982 * ql_remove_link
15983 * Remove a link from the chain.
15985 * Input:
15986 * head = Head of link list.
15987 * link = link to be removed.
15988 * LOCK must be already obtained.
15990 * Context:
15991 * Interrupt or Kernel context, no mailbox commands allowed.
15993 void
15994 ql_remove_link(ql_head_t *head, ql_link_t *link)
15996 if (link->prev != NULL) {
15997 if ((link->prev->next = link->next) == NULL) {
15998 head->last = link->prev;
15999 } else {
16000 link->next->prev = link->prev;
16002 } else if ((head->first = link->next) == NULL) {
16003 head->last = NULL;
16004 } else {
16005 head->first->prev = NULL;
16008 /* not on a queue any more */
16009 link->prev = link->next = NULL;
16010 link->head = NULL;
16014 * ql_chg_endian
16015 * Change endianess of byte array.
16017 * Input:
16018 * buf = array pointer.
16019 * size = size of array in bytes.
16021 * Context:
16022 * Interrupt or Kernel context, no mailbox commands allowed.
16024 void
16025 ql_chg_endian(uint8_t buf[], size_t size)
16027 uint8_t byte;
16028 size_t cnt1;
16029 size_t cnt;
16031 cnt1 = size - 1;
16032 for (cnt = 0; cnt < size / 2; cnt++) {
16033 byte = buf[cnt1];
16034 buf[cnt1] = buf[cnt];
16035 buf[cnt] = byte;
16036 cnt1--;
16041 * ql_bstr_to_dec
16042 * Convert decimal byte string to number.
16044 * Input:
16045 * s: byte string pointer.
16046 * ans: interger pointer for number.
16047 * size: number of ascii bytes.
16049 * Returns:
16050 * success = number of ascii bytes processed.
16052 * Context:
16053 * Kernel/Interrupt context.
16055 static int
16056 ql_bstr_to_dec(char *s, uint32_t *ans, uint32_t size)
16058 int mul, num, cnt, pos;
16059 char *str;
16061 /* Calculate size of number. */
16062 if (size == 0) {
16063 for (str = s; *str >= '0' && *str <= '9'; str++) {
16064 size++;
16068 *ans = 0;
16069 for (cnt = 0; *s != '\0' && size; size--, cnt++) {
16070 if (*s >= '0' && *s <= '9') {
16071 num = *s++ - '0';
16072 } else {
16073 break;
16076 for (mul = 1, pos = 1; pos < size; pos++) {
16077 mul *= 10;
16079 *ans += num * mul;
16082 return (cnt);
16086 * ql_delay
16087 * Calls delay routine if threads are not suspended, otherwise, busy waits
16088 * Minimum = 1 tick = 10ms
16090 * Input:
16091 * dly = delay time in microseconds.
16093 * Context:
16094 * Kernel or Interrupt context, no mailbox commands allowed.
16096 void
16097 ql_delay(ql_adapter_state_t *ha, clock_t usecs)
16099 if (QL_DAEMON_SUSPENDED(ha) || ddi_in_panic()) {
16100 drv_usecwait(usecs);
16101 } else {
16102 delay(drv_usectohz(usecs));
16107 * ql_stall_drv
16108 * Stalls one or all driver instances, waits for 30 seconds.
16110 * Input:
16111 * ha: adapter state pointer or NULL for all.
16112 * options: BIT_0 --> leave driver stalled on exit if
16113 * failed.
16115 * Returns:
16116 * ql local function return status code.
16118 * Context:
16119 * Kernel context.
16122 ql_stall_driver(ql_adapter_state_t *ha, uint32_t options)
16124 ql_link_t *link;
16125 ql_adapter_state_t *ha2;
16126 uint32_t timer;
16128 QL_PRINT_3(CE_CONT, "started\n");
16130 /* Wait for 30 seconds for daemons unstall. */
16131 timer = 3000;
16132 link = ha == NULL ? ql_hba.first : &ha->hba;
16133 while (link != NULL && timer) {
16134 ha2 = link->base_address;
16136 ql_awaken_task_daemon(ha2, NULL, DRIVER_STALL, 0);
16138 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16139 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16140 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG &&
16141 ql_wait_outstanding(ha2) == MAX_OUTSTANDING_COMMANDS)) {
16142 link = ha == NULL ? link->next : NULL;
16143 continue;
16146 ql_delay(ha2, 10000);
16147 timer--;
16148 link = ha == NULL ? ql_hba.first : &ha->hba;
16151 if (ha2 != NULL && timer == 0) {
16152 EL(ha2, "failed, tdf=%xh, exiting state is: %s\n",
16153 ha2->task_daemon_flags, (options & BIT_0 ? "stalled" :
16154 "unstalled"));
16155 if (options & BIT_0) {
16156 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16158 return (QL_FUNCTION_TIMEOUT);
16161 QL_PRINT_3(CE_CONT, "done\n");
16163 return (QL_SUCCESS);
16167 * ql_restart_driver
16168 * Restarts one or all driver instances.
16170 * Input:
16171 * ha: adapter state pointer or NULL for all.
16173 * Context:
16174 * Kernel context.
16176 void
16177 ql_restart_driver(ql_adapter_state_t *ha)
16179 ql_link_t *link;
16180 ql_adapter_state_t *ha2;
16181 uint32_t timer;
16183 QL_PRINT_3(CE_CONT, "started\n");
16185 /* Tell all daemons to unstall. */
16186 link = ha == NULL ? ql_hba.first : &ha->hba;
16187 while (link != NULL) {
16188 ha2 = link->base_address;
16190 ql_awaken_task_daemon(ha2, NULL, 0, DRIVER_STALL);
16192 link = ha == NULL ? link->next : NULL;
16195 /* Wait for 30 seconds for all daemons unstall. */
16196 timer = 3000;
16197 link = ha == NULL ? ql_hba.first : &ha->hba;
16198 while (link != NULL && timer) {
16199 ha2 = link->base_address;
16201 if ((ha2->task_daemon_flags & TASK_DAEMON_ALIVE_FLG) == 0 ||
16202 (ha2->task_daemon_flags & TASK_DAEMON_STOP_FLG) != 0 ||
16203 (ha2->task_daemon_flags & TASK_DAEMON_STALLED_FLG) == 0) {
16204 QL_PRINT_2(CE_CONT, "(%d,%d): restarted\n",
16205 ha2->instance, ha2->vp_index);
16206 ql_restart_queues(ha2);
16207 link = ha == NULL ? link->next : NULL;
16208 continue;
16211 QL_PRINT_2(CE_CONT, "(%d,%d): failed, tdf=%xh\n",
16212 ha2->instance, ha2->vp_index, ha2->task_daemon_flags);
16214 ql_delay(ha2, 10000);
16215 timer--;
16216 link = ha == NULL ? ql_hba.first : &ha->hba;
16219 QL_PRINT_3(CE_CONT, "done\n");
16223 * ql_setup_interrupts
16224 * Sets up interrupts based on the HBA's and platform's
16225 * capabilities (e.g., legacy / MSI / FIXED).
16227 * Input:
16228 * ha = adapter state pointer.
16230 * Returns:
16231 * DDI_SUCCESS or DDI_FAILURE.
16233 * Context:
16234 * Kernel context.
16236 static int
16237 ql_setup_interrupts(ql_adapter_state_t *ha)
16239 int32_t rval = DDI_FAILURE;
16240 int32_t i;
16241 int32_t itypes = 0;
16243 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16246 * The Solaris Advanced Interrupt Functions (aif) are only
16247 * supported on s10U1 or greater.
16249 if (ql_os_release_level < 10 || ql_disable_aif != 0) {
16250 EL(ha, "interrupt framework is not supported or is "
16251 "disabled, using legacy\n");
16252 return (ql_legacy_intr(ha));
16253 } else if (ql_os_release_level == 10) {
16255 * See if the advanced interrupt functions (aif) are
16256 * in the kernel
16258 void *fptr = (void *)&ddi_intr_get_supported_types;
16260 if (fptr == NULL) {
16261 EL(ha, "aif is not supported, using legacy "
16262 "interrupts (rev)\n");
16263 return (ql_legacy_intr(ha));
16267 /* See what types of interrupts this HBA and platform support */
16268 if ((i = ddi_intr_get_supported_types(ha->dip, &itypes)) !=
16269 DDI_SUCCESS) {
16270 EL(ha, "get supported types failed, rval=%xh, "
16271 "assuming FIXED\n", i);
16272 itypes = DDI_INTR_TYPE_FIXED;
16275 EL(ha, "supported types are: %xh\n", itypes);
16277 if ((itypes & DDI_INTR_TYPE_MSIX) &&
16278 (rval = ql_setup_msix(ha)) == DDI_SUCCESS) {
16279 EL(ha, "successful MSI-X setup\n");
16280 } else if ((itypes & DDI_INTR_TYPE_MSI) &&
16281 (rval = ql_setup_msi(ha)) == DDI_SUCCESS) {
16282 EL(ha, "successful MSI setup\n");
16283 } else {
16284 rval = ql_setup_fixed(ha);
16287 if (rval != DDI_SUCCESS) {
16288 EL(ha, "failed, aif, rval=%xh\n", rval);
16289 } else {
16290 /*EMPTY*/
16291 QL_PRINT_3(CE_CONT, "(%d): done\n");
16294 return (rval);
16298 * ql_setup_msi
16299 * Set up aif MSI interrupts
16301 * Input:
16302 * ha = adapter state pointer.
16304 * Returns:
16305 * DDI_SUCCESS or DDI_FAILURE.
16307 * Context:
16308 * Kernel context.
16310 static int
16311 ql_setup_msi(ql_adapter_state_t *ha)
16313 int32_t count = 0;
16314 int32_t avail = 0;
16315 int32_t actual = 0;
16316 int32_t msitype = DDI_INTR_TYPE_MSI;
16317 int32_t ret;
16318 ql_ifunc_t itrfun[10] = {0};
16320 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16322 if (ql_disable_msi != 0) {
16323 EL(ha, "MSI is disabled by user\n");
16324 return (DDI_FAILURE);
16327 /* MSI support is only suported on 24xx HBA's. */
16328 if (!(CFG_IST(ha, CFG_CTRL_24258081))) {
16329 EL(ha, "HBA does not support MSI\n");
16330 return (DDI_FAILURE);
16333 /* Get number of MSI interrupts the system supports */
16334 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16335 DDI_SUCCESS) || count == 0) {
16336 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16337 return (DDI_FAILURE);
16340 /* Get number of available MSI interrupts */
16341 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16342 DDI_SUCCESS) || avail == 0) {
16343 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16344 return (DDI_FAILURE);
16347 /* MSI requires only 1. */
16348 count = 1;
16349 itrfun[0].ifunc = &ql_isr_aif;
16351 /* Allocate space for interrupt handles */
16352 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16353 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16355 ha->iflags |= IFLG_INTR_MSI;
16357 /* Allocate the interrupts */
16358 if ((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype, 0, count,
16359 &actual, 0)) != DDI_SUCCESS || actual < count) {
16360 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16361 "actual=%xh\n", ret, count, actual);
16362 ql_release_intr(ha);
16363 return (DDI_FAILURE);
16366 ha->intr_cnt = actual;
16368 /* Get interrupt priority */
16369 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16370 DDI_SUCCESS) {
16371 EL(ha, "failed, get_pri ret=%xh\n", ret);
16372 ql_release_intr(ha);
16373 return (ret);
16376 /* Add the interrupt handler */
16377 if ((ret = ddi_intr_add_handler(ha->htable[0], itrfun[0].ifunc,
16378 (caddr_t)ha, (caddr_t)0)) != DDI_SUCCESS) {
16379 EL(ha, "failed, intr_add ret=%xh\n", ret);
16380 ql_release_intr(ha);
16381 return (ret);
16384 /* Setup mutexes */
16385 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16386 EL(ha, "failed, mutex init ret=%xh\n", ret);
16387 ql_release_intr(ha);
16388 return (ret);
16391 /* Get the capabilities */
16392 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16394 /* Enable interrupts */
16395 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16396 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16397 DDI_SUCCESS) {
16398 EL(ha, "failed, block enable, ret=%xh\n", ret);
16399 ql_destroy_mutex(ha);
16400 ql_release_intr(ha);
16401 return (ret);
16403 } else {
16404 if ((ret = ddi_intr_enable(ha->htable[0])) != DDI_SUCCESS) {
16405 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16406 ql_destroy_mutex(ha);
16407 ql_release_intr(ha);
16408 return (ret);
16412 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16414 return (DDI_SUCCESS);
16418 * ql_setup_msix
16419 * Set up aif MSI-X interrupts
16421 * Input:
16422 * ha = adapter state pointer.
16424 * Returns:
16425 * DDI_SUCCESS or DDI_FAILURE.
16427 * Context:
16428 * Kernel context.
16430 static int
16431 ql_setup_msix(ql_adapter_state_t *ha)
16433 uint16_t hwvect;
16434 int32_t count = 0;
16435 int32_t avail = 0;
16436 int32_t actual = 0;
16437 int32_t msitype = DDI_INTR_TYPE_MSIX;
16438 int32_t ret;
16439 uint32_t i;
16440 ql_ifunc_t itrfun[QL_MSIX_MAXAIF] = {0};
16442 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16444 if (ql_disable_msix != 0) {
16445 EL(ha, "MSI-X is disabled by user\n");
16446 return (DDI_FAILURE);
16450 * MSI-X support is only available on 24xx HBA's that have
16451 * rev A2 parts (revid = 3) or greater.
16453 if (!((ha->device_id == 0x2532) || (ha->device_id == 0x2432) ||
16454 (ha->device_id == 0x8432) || (ha->device_id == 0x8001) ||
16455 (ha->device_id == 0x8021))) {
16456 EL(ha, "HBA does not support MSI-X\n");
16457 return (DDI_FAILURE);
16460 if (CFG_IST(ha, CFG_CTRL_2422) && (ha->rev_id < 3)) {
16461 EL(ha, "HBA does not support MSI-X (revid)\n");
16462 return (DDI_FAILURE);
16465 /* Per HP, these HP branded HBA's are not supported with MSI-X */
16466 if (ha->ven_id == 0x103C && (ha->subsys_id == 0x7041 ||
16467 ha->subsys_id == 0x7040 || ha->subsys_id == 0x1705)) {
16468 EL(ha, "HBA does not support MSI-X (subdevid)\n");
16469 return (DDI_FAILURE);
16472 /* Get the number of 24xx/25xx MSI-X h/w vectors */
16473 hwvect = (uint16_t)(((CFG_IST(ha, CFG_CTRL_2422) ?
16474 ql_pci_config_get16(ha, 0x7e) :
16475 ql_pci_config_get16(ha, 0xa2)) & 0x3ff) + 1);
16477 EL(ha, "pcie config space hwvect = %d\n", hwvect);
16479 if (hwvect < QL_MSIX_MAXAIF) {
16480 EL(ha, "failed, min h/w vectors req'd: %d, avail: %d\n",
16481 QL_MSIX_MAXAIF, hwvect);
16482 return (DDI_FAILURE);
16485 /* Get number of MSI-X interrupts the platform h/w supports */
16486 if (((ret = ddi_intr_get_nintrs(ha->dip, msitype, &count)) !=
16487 DDI_SUCCESS) || count == 0) {
16488 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16489 return (DDI_FAILURE);
16492 /* Get number of available system interrupts */
16493 if (((ret = ddi_intr_get_navail(ha->dip, msitype, &avail)) !=
16494 DDI_SUCCESS) || avail == 0) {
16495 EL(ha, "failed, navail ret=%xh, avail=%xh\n", ret, avail);
16496 return (DDI_FAILURE);
16499 /* Fill out the intr table */
16500 count = QL_MSIX_MAXAIF;
16501 itrfun[QL_MSIX_AIF].ifunc = &ql_isr_aif;
16502 itrfun[QL_MSIX_RSPQ].ifunc = &ql_isr_aif;
16504 /* Allocate space for interrupt handles */
16505 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * hwvect);
16506 if ((ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP)) == NULL) {
16507 ha->hsize = 0;
16508 EL(ha, "failed, unable to allocate htable space\n");
16509 return (DDI_FAILURE);
16512 ha->iflags |= IFLG_INTR_MSIX;
16514 /* Allocate the interrupts */
16515 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, msitype,
16516 DDI_INTR_ALLOC_NORMAL, count, &actual, 0)) != DDI_SUCCESS) ||
16517 actual < QL_MSIX_MAXAIF) {
16518 EL(ha, "failed, intr_alloc ret=%xh, count = %xh, "
16519 "actual=%xh\n", ret, count, actual);
16520 ql_release_intr(ha);
16521 return (DDI_FAILURE);
16524 ha->intr_cnt = actual;
16526 /* Get interrupt priority */
16527 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16528 DDI_SUCCESS) {
16529 EL(ha, "failed, get_pri ret=%xh\n", ret);
16530 ql_release_intr(ha);
16531 return (ret);
16534 /* Add the interrupt handlers */
16535 for (i = 0; i < actual; i++) {
16536 if ((ret = ddi_intr_add_handler(ha->htable[i], itrfun[i].ifunc,
16537 (void *)ha, (void *)((ulong_t)i))) != DDI_SUCCESS) {
16538 EL(ha, "failed, addh#=%xh, act=%xh, ret=%xh\n", i,
16539 actual, ret);
16540 ql_release_intr(ha);
16541 return (ret);
16546 * duplicate the rest of the intr's
16547 * ddi_intr_dup_handler() isn't working on x86 just yet...
16550 /* Setup mutexes */
16551 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16552 EL(ha, "failed, mutex init ret=%xh\n", ret);
16553 ql_release_intr(ha);
16554 return (ret);
16557 /* Get the capabilities */
16558 (void) ddi_intr_get_cap(ha->htable[0], &ha->intr_cap);
16560 /* Enable interrupts */
16561 if (ha->intr_cap & DDI_INTR_FLAG_BLOCK) {
16562 if ((ret = ddi_intr_block_enable(ha->htable, ha->intr_cnt)) !=
16563 DDI_SUCCESS) {
16564 EL(ha, "failed, block enable, ret=%xh\n", ret);
16565 ql_destroy_mutex(ha);
16566 ql_release_intr(ha);
16567 return (ret);
16569 } else {
16570 for (i = 0; i < ha->intr_cnt; i++) {
16571 if ((ret = ddi_intr_enable(ha->htable[i])) !=
16572 DDI_SUCCESS) {
16573 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16574 ql_destroy_mutex(ha);
16575 ql_release_intr(ha);
16576 return (ret);
16581 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16583 return (DDI_SUCCESS);
16587 * ql_setup_fixed
16588 * Sets up aif FIXED interrupts
16590 * Input:
16591 * ha = adapter state pointer.
16593 * Returns:
16594 * DDI_SUCCESS or DDI_FAILURE.
16596 * Context:
16597 * Kernel context.
16599 static int
16600 ql_setup_fixed(ql_adapter_state_t *ha)
16602 int32_t count = 0;
16603 int32_t actual = 0;
16604 int32_t ret;
16605 uint32_t i;
16607 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16609 /* Get number of fixed interrupts the system supports */
16610 if (((ret = ddi_intr_get_nintrs(ha->dip, DDI_INTR_TYPE_FIXED,
16611 &count)) != DDI_SUCCESS) || count == 0) {
16612 EL(ha, "failed, nintrs ret=%xh, cnt=%xh\n", ret, count);
16613 return (DDI_FAILURE);
16616 ha->iflags |= IFLG_INTR_FIXED;
16618 /* Allocate space for interrupt handles */
16619 ha->hsize = ((uint32_t)(sizeof (ddi_intr_handle_t)) * count);
16620 ha->htable = kmem_zalloc(ha->hsize, KM_SLEEP);
16622 /* Allocate the interrupts */
16623 if (((ret = ddi_intr_alloc(ha->dip, ha->htable, DDI_INTR_TYPE_FIXED,
16624 0, count, &actual, DDI_INTR_ALLOC_STRICT)) != DDI_SUCCESS) ||
16625 actual < count) {
16626 EL(ha, "failed, intr_alloc ret=%xh, count=%xh, "
16627 "actual=%xh\n", ret, count, actual);
16628 ql_release_intr(ha);
16629 return (DDI_FAILURE);
16632 ha->intr_cnt = actual;
16634 /* Get interrupt priority */
16635 if ((ret = ddi_intr_get_pri(ha->htable[0], &ha->intr_pri)) !=
16636 DDI_SUCCESS) {
16637 EL(ha, "failed, get_pri ret=%xh\n", ret);
16638 ql_release_intr(ha);
16639 return (ret);
16642 /* Add the interrupt handlers */
16643 for (i = 0; i < ha->intr_cnt; i++) {
16644 if ((ret = ddi_intr_add_handler(ha->htable[i], &ql_isr_aif,
16645 (void *)ha, (void *)((ulong_t)(i)))) != DDI_SUCCESS) {
16646 EL(ha, "failed, intr_add ret=%xh\n", ret);
16647 ql_release_intr(ha);
16648 return (ret);
16652 /* Setup mutexes */
16653 if ((ret = ql_init_mutex(ha)) != DDI_SUCCESS) {
16654 EL(ha, "failed, mutex init ret=%xh\n", ret);
16655 ql_release_intr(ha);
16656 return (ret);
16659 /* Enable interrupts */
16660 for (i = 0; i < ha->intr_cnt; i++) {
16661 if ((ret = ddi_intr_enable(ha->htable[i])) != DDI_SUCCESS) {
16662 EL(ha, "failed, intr enable, ret=%xh\n", ret);
16663 ql_destroy_mutex(ha);
16664 ql_release_intr(ha);
16665 return (ret);
16669 EL(ha, "using FIXED interupts\n");
16671 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16673 return (DDI_SUCCESS);
16677 * ql_disable_intr
16678 * Disables interrupts
16680 * Input:
16681 * ha = adapter state pointer.
16683 * Returns:
16685 * Context:
16686 * Kernel context.
16688 static void
16689 ql_disable_intr(ql_adapter_state_t *ha)
16691 uint32_t i, rval;
16693 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16695 if (!(ha->iflags & IFLG_INTR_AIF)) {
16697 /* Disable legacy interrupts */
16698 (void) ddi_remove_intr(ha->dip, 0, ha->iblock_cookie);
16700 } else if ((ha->intr_cap & DDI_INTR_FLAG_BLOCK) &&
16701 (ha->iflags & (IFLG_INTR_MSI | IFLG_INTR_MSIX))) {
16703 /* Remove AIF block interrupts (MSI) */
16704 if ((rval = ddi_intr_block_disable(ha->htable, ha->intr_cnt))
16705 != DDI_SUCCESS) {
16706 EL(ha, "failed intr block disable, rval=%x\n", rval);
16709 } else {
16711 /* Remove AIF non-block interrupts (fixed). */
16712 for (i = 0; i < ha->intr_cnt; i++) {
16713 if ((rval = ddi_intr_disable(ha->htable[i])) !=
16714 DDI_SUCCESS) {
16715 EL(ha, "failed intr disable, intr#=%xh, "
16716 "rval=%xh\n", i, rval);
16721 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16725 * ql_release_intr
16726 * Releases aif legacy interrupt resources
16728 * Input:
16729 * ha = adapter state pointer.
16731 * Returns:
16733 * Context:
16734 * Kernel context.
16736 static void
16737 ql_release_intr(ql_adapter_state_t *ha)
16739 int32_t i;
16741 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16743 if (!(ha->iflags & IFLG_INTR_AIF)) {
16744 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16745 return;
16748 ha->iflags &= ~(IFLG_INTR_AIF);
16749 if (ha->htable != NULL && ha->hsize > 0) {
16750 i = (int32_t)ha->hsize / (int32_t)sizeof (ddi_intr_handle_t);
16751 while (i-- > 0) {
16752 if (ha->htable[i] == 0) {
16753 EL(ha, "htable[%x]=0h\n", i);
16754 continue;
16757 (void) ddi_intr_disable(ha->htable[i]);
16759 if (i < ha->intr_cnt) {
16760 (void) ddi_intr_remove_handler(ha->htable[i]);
16763 (void) ddi_intr_free(ha->htable[i]);
16766 kmem_free(ha->htable, ha->hsize);
16767 ha->htable = NULL;
16770 ha->hsize = 0;
16771 ha->intr_cnt = 0;
16772 ha->intr_pri = 0;
16773 ha->intr_cap = 0;
16775 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16779 * ql_legacy_intr
16780 * Sets up legacy interrupts.
16782 * NB: Only to be used if AIF (Advanced Interupt Framework)
16783 * if NOT in the kernel.
16785 * Input:
16786 * ha = adapter state pointer.
16788 * Returns:
16789 * DDI_SUCCESS or DDI_FAILURE.
16791 * Context:
16792 * Kernel context.
16794 static int
16795 ql_legacy_intr(ql_adapter_state_t *ha)
16797 int rval = DDI_SUCCESS;
16799 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16801 /* Setup mutexes */
16802 if (ql_init_mutex(ha) != DDI_SUCCESS) {
16803 EL(ha, "failed, mutex init\n");
16804 return (DDI_FAILURE);
16807 /* Setup standard/legacy interrupt handler */
16808 if (ddi_add_intr(ha->dip, 0, &ha->iblock_cookie,
16809 (ddi_idevice_cookie_t *)0, ql_isr, (caddr_t)ha) != DDI_SUCCESS) {
16810 cmn_err(CE_WARN, "%s(%d): Failed to add legacy interrupt",
16811 QL_NAME, ha->instance);
16812 ql_destroy_mutex(ha);
16813 rval = DDI_FAILURE;
16816 if (rval == DDI_SUCCESS) {
16817 ha->iflags |= IFLG_INTR_LEGACY;
16818 EL(ha, "using legacy interrupts\n");
16821 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16823 return (rval);
16827 * ql_init_mutex
16828 * Initializes mutex's
16830 * Input:
16831 * ha = adapter state pointer.
16833 * Returns:
16834 * DDI_SUCCESS or DDI_FAILURE.
16836 * Context:
16837 * Kernel context.
16839 static int
16840 ql_init_mutex(ql_adapter_state_t *ha)
16842 int ret;
16843 void *intr;
16845 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16847 if (ha->iflags & IFLG_INTR_AIF) {
16848 intr = (void *)(uintptr_t)ha->intr_pri;
16849 } else {
16850 /* Get iblock cookies to initialize mutexes */
16851 if ((ret = ddi_get_iblock_cookie(ha->dip, 0,
16852 &ha->iblock_cookie)) != DDI_SUCCESS) {
16853 EL(ha, "failed, get_iblock: %xh\n", ret);
16854 return (DDI_FAILURE);
16856 intr = (void *)ha->iblock_cookie;
16859 /* mutexes to protect the adapter state structure. */
16860 mutex_init(&ha->mutex, NULL, MUTEX_DRIVER, intr);
16862 /* mutex to protect the ISP response ring. */
16863 mutex_init(&ha->intr_mutex, NULL, MUTEX_DRIVER, intr);
16865 /* mutex to protect the mailbox registers. */
16866 mutex_init(&ha->mbx_mutex, NULL, MUTEX_DRIVER, intr);
16868 /* power management protection */
16869 mutex_init(&ha->pm_mutex, NULL, MUTEX_DRIVER, intr);
16871 /* Mailbox wait and interrupt conditional variable. */
16872 cv_init(&ha->cv_mbx_wait, NULL, CV_DRIVER, NULL);
16873 cv_init(&ha->cv_mbx_intr, NULL, CV_DRIVER, NULL);
16875 /* mutex to protect the ISP request ring. */
16876 mutex_init(&ha->req_ring_mutex, NULL, MUTEX_DRIVER, intr);
16878 /* Unsolicited buffer conditional variable. */
16879 cv_init(&ha->cv_ub, NULL, CV_DRIVER, NULL);
16881 mutex_init(&ha->ub_mutex, NULL, MUTEX_DRIVER, intr);
16882 mutex_init(&ha->cache_mutex, NULL, MUTEX_DRIVER, intr);
16884 /* Suspended conditional variable. */
16885 cv_init(&ha->cv_dr_suspended, NULL, CV_DRIVER, NULL);
16887 /* mutex to protect task daemon context. */
16888 mutex_init(&ha->task_daemon_mutex, NULL, MUTEX_DRIVER, intr);
16890 /* Task_daemon thread conditional variable. */
16891 cv_init(&ha->cv_task_daemon, NULL, CV_DRIVER, NULL);
16893 /* mutex to protect diag port manage interface */
16894 mutex_init(&ha->portmutex, NULL, MUTEX_DRIVER, intr);
16896 /* mutex to protect per instance f/w dump flags and buffer */
16897 mutex_init(&ha->dump_mutex, NULL, MUTEX_DRIVER, intr);
16899 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16901 return (DDI_SUCCESS);
16905 * ql_destroy_mutex
16906 * Destroys mutex's
16908 * Input:
16909 * ha = adapter state pointer.
16911 * Returns:
16913 * Context:
16914 * Kernel context.
16916 static void
16917 ql_destroy_mutex(ql_adapter_state_t *ha)
16919 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16921 mutex_destroy(&ha->dump_mutex);
16922 mutex_destroy(&ha->portmutex);
16923 cv_destroy(&ha->cv_task_daemon);
16924 mutex_destroy(&ha->task_daemon_mutex);
16925 cv_destroy(&ha->cv_dr_suspended);
16926 mutex_destroy(&ha->cache_mutex);
16927 mutex_destroy(&ha->ub_mutex);
16928 cv_destroy(&ha->cv_ub);
16929 mutex_destroy(&ha->req_ring_mutex);
16930 cv_destroy(&ha->cv_mbx_intr);
16931 cv_destroy(&ha->cv_mbx_wait);
16932 mutex_destroy(&ha->pm_mutex);
16933 mutex_destroy(&ha->mbx_mutex);
16934 mutex_destroy(&ha->intr_mutex);
16935 mutex_destroy(&ha->mutex);
16937 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
16941 * ql_fwmodule_resolve
16942 * Loads and resolves external firmware module and symbols
16944 * Input:
16945 * ha: adapter state pointer.
16947 * Returns:
16948 * ql local function return status code:
16949 * QL_SUCCESS - external f/w module module and symbols resolved
16950 * QL_FW_NOT_SUPPORTED - Driver does not support ISP type
16951 * QL_FWMODLOAD_FAILED - Could not load f/w module (ddi failed)
16952 * QL_FWSYM_NOT_FOUND - Unable to resolve internal f/w symbol
16953 * Context:
16954 * Kernel context.
16956 * NOTE: We currently ddi_modopen/ddi_modclose at attach/detach time. We
16957 * could switch to a tighter scope around acutal download (and add an extra
16958 * ddi_modopen for module opens that occur before root is mounted).
16961 uint32_t
16962 ql_fwmodule_resolve(ql_adapter_state_t *ha)
16964 int8_t module[128];
16965 int8_t fw_version[128];
16966 uint32_t rval = QL_SUCCESS;
16967 caddr_t code, code02;
16968 uint8_t *p_ucfw;
16969 uint16_t *p_usaddr, *p_uslen;
16970 uint32_t *p_uiaddr, *p_uilen, *p_uifw;
16971 uint32_t *p_uiaddr02, *p_uilen02;
16972 struct fw_table *fwt;
16973 extern struct fw_table fw_table[];
16975 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
16977 if (ha->fw_module != NULL) {
16978 EL(ha, "%x f/w module %d.%02d.%02d is already loaded\n",
16979 ha->fw_class, ha->fw_major_version, ha->fw_minor_version,
16980 ha->fw_subminor_version);
16981 return (rval);
16984 /* make sure the fw_class is in the fw_table of supported classes */
16985 for (fwt = &fw_table[0]; fwt->fw_version; fwt++) {
16986 if (fwt->fw_class == ha->fw_class)
16987 break; /* match */
16989 if (fwt->fw_version == NULL) {
16990 cmn_err(CE_WARN, "%s(%d): can't find f/w class %x "
16991 "in driver's fw_table", QL_NAME, ha->instance,
16992 ha->fw_class);
16993 return (QL_FW_NOT_SUPPORTED);
16997 * open the module related to the fw_class
16999 (void) snprintf(module, sizeof (module), "misc/qlc/qlc_fw_%x",
17000 ha->fw_class);
17002 ha->fw_module = ddi_modopen(module, KRTLD_MODE_FIRST, NULL);
17003 if (ha->fw_module == NULL) {
17004 cmn_err(CE_WARN, "%s(%d): can't load firmware file %s",
17005 QL_NAME, ha->instance, module);
17006 return (QL_FWMODLOAD_FAILED);
17010 * resolve the fw module symbols, data types depend on fw_class
17013 switch (ha->fw_class) {
17014 case 0x2200:
17015 case 0x2300:
17016 case 0x6322:
17018 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17019 NULL)) == NULL) {
17020 rval = QL_FWSYM_NOT_FOUND;
17021 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17022 } else if ((p_usaddr = ddi_modsym(ha->fw_module,
17023 "risc_code_addr01", NULL)) == NULL) {
17024 rval = QL_FWSYM_NOT_FOUND;
17025 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17026 } else if ((p_uslen = ddi_modsym(ha->fw_module,
17027 "risc_code_length01", NULL)) == NULL) {
17028 rval = QL_FWSYM_NOT_FOUND;
17029 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17030 } else if ((p_ucfw = ddi_modsym(ha->fw_module,
17031 "firmware_version", NULL)) == NULL) {
17032 rval = QL_FWSYM_NOT_FOUND;
17033 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17036 if (rval == QL_SUCCESS) {
17037 ha->risc_fw[0].code = code;
17038 ha->risc_fw[0].addr = *p_usaddr;
17039 ha->risc_fw[0].length = *p_uslen;
17041 (void) snprintf(fw_version, sizeof (fw_version),
17042 "%d.%02d.%02d", p_ucfw[0], p_ucfw[1], p_ucfw[2]);
17044 break;
17046 case 0x2400:
17047 case 0x2500:
17048 case 0x8100:
17050 if ((code = ddi_modsym(ha->fw_module, "risc_code01",
17051 NULL)) == NULL) {
17052 rval = QL_FWSYM_NOT_FOUND;
17053 EL(ha, "failed, f/w module %d rc01 symbol\n", module);
17054 } else if ((p_uiaddr = ddi_modsym(ha->fw_module,
17055 "risc_code_addr01", NULL)) == NULL) {
17056 rval = QL_FWSYM_NOT_FOUND;
17057 EL(ha, "failed, f/w module %d rca01 symbol\n", module);
17058 } else if ((p_uilen = ddi_modsym(ha->fw_module,
17059 "risc_code_length01", NULL)) == NULL) {
17060 rval = QL_FWSYM_NOT_FOUND;
17061 EL(ha, "failed, f/w module %d rcl01 symbol\n", module);
17062 } else if ((p_uifw = ddi_modsym(ha->fw_module,
17063 "firmware_version", NULL)) == NULL) {
17064 rval = QL_FWSYM_NOT_FOUND;
17065 EL(ha, "failed, f/w module %d fwver symbol\n", module);
17068 if ((code02 = ddi_modsym(ha->fw_module, "risc_code02",
17069 NULL)) == NULL) {
17070 rval = QL_FWSYM_NOT_FOUND;
17071 EL(ha, "failed, f/w module %d rc02 symbol\n", module);
17072 } else if ((p_uiaddr02 = ddi_modsym(ha->fw_module,
17073 "risc_code_addr02", NULL)) == NULL) {
17074 rval = QL_FWSYM_NOT_FOUND;
17075 EL(ha, "failed, f/w module %d rca02 symbol\n", module);
17076 } else if ((p_uilen02 = ddi_modsym(ha->fw_module,
17077 "risc_code_length02", NULL)) == NULL) {
17078 rval = QL_FWSYM_NOT_FOUND;
17079 EL(ha, "failed, f/w module %d rcl02 symbol\n", module);
17082 if (rval == QL_SUCCESS) {
17083 ha->risc_fw[0].code = code;
17084 ha->risc_fw[0].addr = *p_uiaddr;
17085 ha->risc_fw[0].length = *p_uilen;
17086 ha->risc_fw[1].code = code02;
17087 ha->risc_fw[1].addr = *p_uiaddr02;
17088 ha->risc_fw[1].length = *p_uilen02;
17090 (void) snprintf(fw_version, sizeof (fw_version),
17091 "%d.%02d.%02d", p_uifw[0], p_uifw[1], p_uifw[2]);
17093 break;
17095 default:
17096 EL(ha, "fw_class: '%x' is not supported\n", ha->fw_class);
17097 rval = QL_FW_NOT_SUPPORTED;
17100 if (rval != QL_SUCCESS) {
17101 cmn_err(CE_WARN, "%s(%d): can't resolve firmware "
17102 "module %s (%x)", QL_NAME, ha->instance, module, rval);
17103 if (ha->fw_module != NULL) {
17104 (void) ddi_modclose(ha->fw_module);
17105 ha->fw_module = NULL;
17107 } else {
17109 * check for firmware version mismatch between module and
17110 * compiled in fw_table version.
17113 if (strcmp(fwt->fw_version, fw_version) != 0) {
17116 * If f/w / driver version mismatches then
17117 * return a successful status -- however warn
17118 * the user that this is NOT recommended.
17121 cmn_err(CE_WARN, "%s(%d): driver / f/w version "
17122 "mismatch for %x: driver-%s module-%s", QL_NAME,
17123 ha->instance, ha->fw_class, fwt->fw_version,
17124 fw_version);
17126 ha->cfg_flags |= CFG_FW_MISMATCH;
17127 } else {
17128 ha->cfg_flags &= ~CFG_FW_MISMATCH;
17132 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17134 return (rval);
17138 * ql_port_state
17139 * Set the state on all adapter ports.
17141 * Input:
17142 * ha: parent adapter state pointer.
17143 * state: port state.
17144 * flags: task daemon flags to set.
17146 * Context:
17147 * Interrupt or Kernel context, no mailbox commands allowed.
17149 void
17150 ql_port_state(ql_adapter_state_t *ha, uint32_t state, uint32_t flags)
17152 ql_adapter_state_t *vha;
17154 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17156 TASK_DAEMON_LOCK(ha);
17157 for (vha = ha->pha; vha != NULL; vha = vha->vp_next) {
17158 if (FC_PORT_STATE_MASK(vha->state) != state) {
17159 vha->state = state != FC_STATE_OFFLINE ?
17160 (FC_PORT_SPEED_MASK(vha->state) | state) : state;
17161 vha->task_daemon_flags |= flags;
17164 ha->pha->task_daemon_flags |= flags & LOOP_DOWN;
17165 TASK_DAEMON_UNLOCK(ha);
17167 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17171 * ql_el_trace_desc_ctor - Construct an extended logging trace descriptor.
17173 * Input: Pointer to the adapter state structure.
17174 * Returns: Success or Failure.
17175 * Context: Kernel context.
17178 ql_el_trace_desc_ctor(ql_adapter_state_t *ha)
17180 int rval = DDI_SUCCESS;
17182 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17184 ha->el_trace_desc =
17185 (el_trace_desc_t *)kmem_zalloc(sizeof (el_trace_desc_t), KM_SLEEP);
17187 if (ha->el_trace_desc == NULL) {
17188 cmn_err(CE_WARN, "%s(%d): can't construct trace descriptor",
17189 QL_NAME, ha->instance);
17190 rval = DDI_FAILURE;
17191 } else {
17192 ha->el_trace_desc->next = 0;
17193 ha->el_trace_desc->trace_buffer =
17194 kmem_zalloc(EL_TRACE_BUF_SIZE, KM_SLEEP);
17196 if (ha->el_trace_desc->trace_buffer == NULL) {
17197 cmn_err(CE_WARN, "%s(%d): can't get trace buffer",
17198 QL_NAME, ha->instance);
17199 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17200 rval = DDI_FAILURE;
17201 } else {
17202 ha->el_trace_desc->trace_buffer_size =
17203 EL_TRACE_BUF_SIZE;
17204 mutex_init(&ha->el_trace_desc->mutex, NULL,
17205 MUTEX_DRIVER, NULL);
17209 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17211 return (rval);
17215 * ql_el_trace_desc_dtor - Destroy an extended logging trace descriptor.
17217 * Input: Pointer to the adapter state structure.
17218 * Returns: Success or Failure.
17219 * Context: Kernel context.
17222 ql_el_trace_desc_dtor(ql_adapter_state_t *ha)
17224 int rval = DDI_SUCCESS;
17226 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17228 if (ha->el_trace_desc == NULL) {
17229 cmn_err(CE_WARN, "%s(%d): can't destroy el trace descriptor",
17230 QL_NAME, ha->instance);
17231 rval = DDI_FAILURE;
17232 } else {
17233 if (ha->el_trace_desc->trace_buffer != NULL) {
17234 kmem_free(ha->el_trace_desc->trace_buffer,
17235 ha->el_trace_desc->trace_buffer_size);
17237 mutex_destroy(&ha->el_trace_desc->mutex);
17238 kmem_free(ha->el_trace_desc, sizeof (el_trace_desc_t));
17241 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17243 return (rval);
17247 * els_cmd_text - Return a pointer to a string describing the command
17249 * Input: els_cmd = the els command opcode.
17250 * Returns: pointer to a string.
17251 * Context: Kernel context.
17253 char *
17254 els_cmd_text(int els_cmd)
17256 cmd_table_t *entry = &els_cmd_tbl[0];
17258 return (cmd_text(entry, els_cmd));
17262 * mbx_cmd_text - Return a pointer to a string describing the command
17264 * Input: mbx_cmd = the mailbox command opcode.
17265 * Returns: pointer to a string.
17266 * Context: Kernel context.
17268 char *
17269 mbx_cmd_text(int mbx_cmd)
17271 cmd_table_t *entry = &mbox_cmd_tbl[0];
17273 return (cmd_text(entry, mbx_cmd));
17277 * cmd_text Return a pointer to a string describing the command
17279 * Input: entry = the command table
17280 * cmd = the command.
17281 * Returns: pointer to a string.
17282 * Context: Kernel context.
17284 char *
17285 cmd_text(cmd_table_t *entry, int cmd)
17287 for (; entry->cmd != 0; entry++) {
17288 if (entry->cmd == cmd) {
17289 break;
17292 return (entry->string);
17296 * ql_els_24xx_mbox_cmd_iocb - els request indication.
17298 * Input: ha = adapter state pointer.
17299 * srb = scsi request block pointer.
17300 * arg = els passthru entry iocb pointer.
17301 * Returns:
17302 * Context: Kernel context.
17304 void
17305 ql_els_24xx_iocb(ql_adapter_state_t *ha, ql_srb_t *srb, void *arg)
17307 els_descriptor_t els_desc;
17309 /* Extract the ELS information */
17310 ql_fca_isp_els_request(ha, (fc_packet_t *)srb->pkt, &els_desc);
17312 /* Construct the passthru entry */
17313 ql_isp_els_request_ctor(&els_desc, (els_passthru_entry_t *)arg);
17315 /* Ensure correct endianness */
17316 ql_isp_els_handle_cmd_endian(ha, srb);
17320 * ql_fca_isp_els_request - Extract into an els descriptor the info required
17321 * to build an els_passthru iocb from an fc packet.
17323 * Input: ha = adapter state pointer.
17324 * pkt = fc packet pointer
17325 * els_desc = els descriptor pointer
17326 * Returns:
17327 * Context: Kernel context.
17329 static void
17330 ql_fca_isp_els_request(ql_adapter_state_t *ha, fc_packet_t *pkt,
17331 els_descriptor_t *els_desc)
17333 ls_code_t els;
17335 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17336 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17338 els_desc->els = els.ls_code;
17340 els_desc->els_handle = ha->hba_buf.acc_handle;
17341 els_desc->d_id.b24 = pkt->pkt_cmd_fhdr.d_id;
17342 els_desc->s_id.b24 = pkt->pkt_cmd_fhdr.s_id;
17343 /* if n_port_handle is not < 0x7d use 0 */
17344 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17345 els_desc->n_port_handle = ha->n_port->n_port_handle;
17346 } else {
17347 els_desc->n_port_handle = 0;
17349 els_desc->control_flags = 0;
17350 els_desc->cmd_byte_count = pkt->pkt_cmdlen;
17352 * Transmit DSD. This field defines the Fibre Channel Frame payload
17353 * (without the frame header) in system memory.
17355 els_desc->tx_dsd.addr[0] = LSD(pkt->pkt_cmd_cookie->dmac_laddress);
17356 els_desc->tx_dsd.addr[1] = MSD(pkt->pkt_cmd_cookie->dmac_laddress);
17357 els_desc->tx_dsd.length = (uint32_t)pkt->pkt_cmd_cookie->dmac_size;
17359 els_desc->rsp_byte_count = pkt->pkt_rsplen;
17361 * Receive DSD. This field defines the ELS response payload buffer
17362 * for the ISP24xx firmware transferring the received ELS
17363 * response frame to a location in host memory.
17365 els_desc->rx_dsd.addr[0] = LSD(pkt->pkt_resp_cookie->dmac_laddress);
17366 els_desc->rx_dsd.addr[1] = MSD(pkt->pkt_resp_cookie->dmac_laddress);
17367 els_desc->rx_dsd.length = (uint32_t)pkt->pkt_resp_cookie->dmac_size;
17371 * ql_isp_els_request_ctor - Construct an els_passthru_entry iocb
17372 * using the els descriptor.
17374 * Input: ha = adapter state pointer.
17375 * els_desc = els descriptor pointer.
17376 * els_entry = els passthru entry iocb pointer.
17377 * Returns:
17378 * Context: Kernel context.
17380 static void
17381 ql_isp_els_request_ctor(els_descriptor_t *els_desc,
17382 els_passthru_entry_t *els_entry)
17384 uint32_t *ptr32;
17387 * Construct command packet.
17389 ddi_put8(els_desc->els_handle, &els_entry->entry_type,
17390 (uint8_t)ELS_PASSTHRU_TYPE);
17391 ddi_put16(els_desc->els_handle, &els_entry->n_port_hdl,
17392 els_desc->n_port_handle);
17393 ddi_put8(els_desc->els_handle, &els_entry->sof_type, (uint8_t)BIT_4);
17394 ddi_put32(els_desc->els_handle, &els_entry->rcv_exch_address,
17396 ddi_put8(els_desc->els_handle, &els_entry->els_cmd_opcode,
17397 els_desc->els);
17398 ddi_put8(els_desc->els_handle, &els_entry->d_id_7_0,
17399 els_desc->d_id.b.al_pa);
17400 ddi_put8(els_desc->els_handle, &els_entry->d_id_15_8,
17401 els_desc->d_id.b.area);
17402 ddi_put8(els_desc->els_handle, &els_entry->d_id_23_16,
17403 els_desc->d_id.b.domain);
17404 ddi_put8(els_desc->els_handle, &els_entry->s_id_7_0,
17405 els_desc->s_id.b.al_pa);
17406 ddi_put8(els_desc->els_handle, &els_entry->s_id_15_8,
17407 els_desc->s_id.b.area);
17408 ddi_put8(els_desc->els_handle, &els_entry->s_id_23_16,
17409 els_desc->s_id.b.domain);
17410 ddi_put16(els_desc->els_handle, &els_entry->control_flags,
17411 els_desc->control_flags);
17412 ddi_put32(els_desc->els_handle, &els_entry->rcv_payld_data_bcnt,
17413 els_desc->rsp_byte_count);
17414 ddi_put32(els_desc->els_handle, &els_entry->xmt_payld_data_bcnt,
17415 els_desc->cmd_byte_count);
17416 /* Load transmit data segments and count. */
17417 ptr32 = (uint32_t *)&els_entry->xmt_dseg_0_address;
17418 ddi_put16(els_desc->els_handle, &els_entry->xmt_dseg_count, 1);
17419 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[0]);
17420 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.addr[1]);
17421 ddi_put32(els_desc->els_handle, ptr32++, els_desc->tx_dsd.length);
17422 ddi_put16(els_desc->els_handle, &els_entry->rcv_dseg_count, 1);
17423 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[0]);
17424 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.addr[1]);
17425 ddi_put32(els_desc->els_handle, ptr32++, els_desc->rx_dsd.length);
17429 * ql_isp_els_handle_cmd_endian - els requests must be in big endian
17430 * in host memory.
17432 * Input: ha = adapter state pointer.
17433 * srb = scsi request block
17434 * Returns:
17435 * Context: Kernel context.
17437 void
17438 ql_isp_els_handle_cmd_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17440 ls_code_t els;
17441 fc_packet_t *pkt;
17442 uint8_t *ptr;
17444 pkt = srb->pkt;
17446 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17447 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17449 ptr = (uint8_t *)pkt->pkt_cmd;
17451 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17455 * ql_isp_els_handle_rsp_endian - els responses must be in big endian
17456 * in host memory.
17457 * Input: ha = adapter state pointer.
17458 * srb = scsi request block
17459 * Returns:
17460 * Context: Kernel context.
17462 void
17463 ql_isp_els_handle_rsp_endian(ql_adapter_state_t *ha, ql_srb_t *srb)
17465 ls_code_t els;
17466 fc_packet_t *pkt;
17467 uint8_t *ptr;
17469 pkt = srb->pkt;
17471 ddi_rep_get8(pkt->pkt_cmd_acc, (uint8_t *)&els,
17472 (uint8_t *)pkt->pkt_cmd, sizeof (els), DDI_DEV_AUTOINCR);
17474 ptr = (uint8_t *)pkt->pkt_resp;
17475 BIG_ENDIAN_32(&els);
17476 ql_isp_els_handle_endian(ha, ptr, els.ls_code);
17480 * ql_isp_els_handle_endian - els requests/responses must be in big endian
17481 * in host memory.
17482 * Input: ha = adapter state pointer.
17483 * ptr = els request/response buffer pointer.
17484 * ls_code = els command code.
17485 * Returns:
17486 * Context: Kernel context.
17488 void
17489 ql_isp_els_handle_endian(ql_adapter_state_t *ha, uint8_t *ptr, uint8_t ls_code)
17491 switch (ls_code) {
17492 case LA_ELS_PLOGI: {
17493 BIG_ENDIAN_32(ptr); /* Command Code */
17494 ptr += 4;
17495 BIG_ENDIAN_16(ptr); /* FC-PH version */
17496 ptr += 2;
17497 BIG_ENDIAN_16(ptr); /* b2b credit */
17498 ptr += 2;
17499 BIG_ENDIAN_16(ptr); /* Cmn Feature flags */
17500 ptr += 2;
17501 BIG_ENDIAN_16(ptr); /* Rcv data size */
17502 ptr += 2;
17503 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17504 ptr += 2;
17505 BIG_ENDIAN_16(ptr); /* Rel offset */
17506 ptr += 2;
17507 BIG_ENDIAN_32(ptr); /* E_D_TOV */
17508 ptr += 4; /* Port Name */
17509 ptr += 8; /* Node Name */
17510 ptr += 8; /* Class 1 */
17511 ptr += 16; /* Class 2 */
17512 ptr += 16; /* Class 3 */
17513 BIG_ENDIAN_16(ptr); /* Service options */
17514 ptr += 2;
17515 BIG_ENDIAN_16(ptr); /* Initiator control */
17516 ptr += 2;
17517 BIG_ENDIAN_16(ptr); /* Recipient Control */
17518 ptr += 2;
17519 BIG_ENDIAN_16(ptr); /* Rcv size */
17520 ptr += 2;
17521 BIG_ENDIAN_16(ptr); /* Concurrent Seq */
17522 ptr += 2;
17523 BIG_ENDIAN_16(ptr); /* N_Port e2e credit */
17524 ptr += 2;
17525 BIG_ENDIAN_16(ptr); /* Open Seq/Exch */
17526 break;
17528 case LA_ELS_PRLI: {
17529 BIG_ENDIAN_32(ptr); /* Command Code/Page length */
17530 ptr += 4; /* Type */
17531 ptr += 2;
17532 BIG_ENDIAN_16(ptr); /* Flags */
17533 ptr += 2;
17534 BIG_ENDIAN_32(ptr); /* Originator Process associator */
17535 ptr += 4;
17536 BIG_ENDIAN_32(ptr); /* Responder Process associator */
17537 ptr += 4;
17538 BIG_ENDIAN_32(ptr); /* Flags */
17539 break;
17541 default:
17542 EL(ha, "can't handle els code %x\n", ls_code);
17543 break;
17548 * ql_n_port_plogi
17549 * In N port 2 N port topology where an N Port has logged in with the
17550 * firmware because it has the N_Port login initiative, we send up
17551 * a plogi by proxy which stimulates the login procedure to continue.
17553 * Input:
17554 * ha = adapter state pointer.
17555 * Returns:
17557 * Context:
17558 * Kernel context.
17560 static int
17561 ql_n_port_plogi(ql_adapter_state_t *ha)
17563 int rval;
17564 ql_tgt_t *tq;
17565 ql_head_t done_q = { NULL, NULL };
17567 rval = QL_SUCCESS;
17569 if (ha->topology & QL_N_PORT) {
17570 /* if we're doing this the n_port_handle must be good */
17571 if (LOCAL_LOOP_ID(ha->n_port->n_port_handle)) {
17572 tq = ql_loop_id_to_queue(ha,
17573 ha->n_port->n_port_handle);
17574 if (tq != NULL) {
17575 (void) ql_send_plogi(ha, tq, &done_q);
17576 } else {
17577 EL(ha, "n_port_handle = %x, tq = %x\n",
17578 ha->n_port->n_port_handle, tq);
17580 } else {
17581 EL(ha, "n_port_handle = %x, tq = %x\n",
17582 ha->n_port->n_port_handle, tq);
17584 if (done_q.first != NULL) {
17585 ql_done(done_q.first);
17588 return (rval);
17592 * Compare two WWNs. The NAA is omitted for comparison.
17594 * Note particularly that the indentation used in this
17595 * function isn't according to Sun recommendations. It
17596 * is indented to make reading a bit easy.
17598 * Return Values:
17599 * if first == second return 0
17600 * if first > second return 1
17601 * if first < second return -1
17604 ql_wwn_cmp(ql_adapter_state_t *ha, la_wwn_t *first, la_wwn_t *second)
17606 la_wwn_t t1, t2;
17607 int rval;
17609 EL(ha, "WWPN=%08x%08x\n",
17610 BE_32(first->i_wwn[0]), BE_32(first->i_wwn[1]));
17611 EL(ha, "WWPN=%08x%08x\n",
17612 BE_32(second->i_wwn[0]), BE_32(second->i_wwn[1]));
17614 * Fibre Channel protocol is big endian, so compare
17615 * as big endian values
17617 t1.i_wwn[0] = BE_32(first->i_wwn[0]);
17618 t1.i_wwn[1] = BE_32(first->i_wwn[1]);
17620 t2.i_wwn[0] = BE_32(second->i_wwn[0]);
17621 t2.i_wwn[1] = BE_32(second->i_wwn[1]);
17623 if (t1.i_wwn[0] == t2.i_wwn[0]) {
17624 if (t1.i_wwn[1] == t2.i_wwn[1]) {
17625 rval = 0;
17626 } else if (t1.i_wwn[1] > t2.i_wwn[1]) {
17627 rval = 1;
17628 } else {
17629 rval = -1;
17631 } else {
17632 if (t1.i_wwn[0] > t2.i_wwn[0]) {
17633 rval = 1;
17634 } else {
17635 rval = -1;
17638 return (rval);
17642 * ql_wait_for_td_stop
17643 * Wait for task daemon to stop running. Internal command timeout
17644 * is approximately 30 seconds, so it may help in some corner
17645 * cases to wait that long
17647 * Input:
17648 * ha = adapter state pointer.
17650 * Returns:
17651 * DDI_SUCCESS or DDI_FAILURE.
17653 * Context:
17654 * Kernel context.
17657 static int
17658 ql_wait_for_td_stop(ql_adapter_state_t *ha)
17660 int rval = DDI_FAILURE;
17661 UINT16 wait_cnt;
17663 for (wait_cnt = 0; wait_cnt < 3000; wait_cnt++) {
17664 /* The task daemon clears the stop flag on exit. */
17665 if (ha->task_daemon_flags & TASK_DAEMON_STOP_FLG) {
17666 if (ha->cprinfo.cc_events & CALLB_CPR_START ||
17667 ddi_in_panic()) {
17668 drv_usecwait(10000);
17669 } else {
17670 delay(drv_usectohz(10000));
17672 } else {
17673 rval = DDI_SUCCESS;
17674 break;
17677 return (rval);
17681 * ql_nvram_cache_desc_ctor - Construct an nvram cache descriptor.
17683 * Input: Pointer to the adapter state structure.
17684 * Returns: Success or Failure.
17685 * Context: Kernel context.
17688 ql_nvram_cache_desc_ctor(ql_adapter_state_t *ha)
17690 int rval = DDI_SUCCESS;
17692 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17694 ha->nvram_cache =
17695 (nvram_cache_desc_t *)kmem_zalloc(sizeof (nvram_cache_desc_t),
17696 KM_SLEEP);
17698 if (ha->nvram_cache == NULL) {
17699 cmn_err(CE_WARN, "%s(%d): can't construct nvram cache"
17700 " descriptor", QL_NAME, ha->instance);
17701 rval = DDI_FAILURE;
17702 } else {
17703 if (CFG_IST(ha, CFG_CTRL_24258081)) {
17704 ha->nvram_cache->size = sizeof (nvram_24xx_t);
17705 } else {
17706 ha->nvram_cache->size = sizeof (nvram_t);
17708 ha->nvram_cache->cache =
17709 (void *)kmem_zalloc(ha->nvram_cache->size, KM_SLEEP);
17710 if (ha->nvram_cache->cache == NULL) {
17711 cmn_err(CE_WARN, "%s(%d): can't get nvram cache buffer",
17712 QL_NAME, ha->instance);
17713 kmem_free(ha->nvram_cache,
17714 sizeof (nvram_cache_desc_t));
17715 ha->nvram_cache = 0;
17716 rval = DDI_FAILURE;
17717 } else {
17718 mutex_init(&ha->nvram_cache->mutex, NULL,
17719 MUTEX_DRIVER, NULL);
17720 ha->nvram_cache->valid = 0;
17724 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17726 return (rval);
17730 * ql_nvram_cache_desc_dtor - Destroy an nvram cache descriptor.
17732 * Input: Pointer to the adapter state structure.
17733 * Returns: Success or Failure.
17734 * Context: Kernel context.
17737 ql_nvram_cache_desc_dtor(ql_adapter_state_t *ha)
17739 int rval = DDI_SUCCESS;
17741 QL_PRINT_3(CE_CONT, "(%d): started\n", ha->instance);
17743 if (ha->nvram_cache == NULL) {
17744 cmn_err(CE_WARN, "%s(%d): can't destroy nvram descriptor",
17745 QL_NAME, ha->instance);
17746 rval = DDI_FAILURE;
17747 } else {
17748 if (ha->nvram_cache->cache != NULL) {
17749 kmem_free(ha->nvram_cache->cache,
17750 ha->nvram_cache->size);
17752 mutex_destroy(&ha->nvram_cache->mutex);
17753 kmem_free(ha->nvram_cache, sizeof (nvram_cache_desc_t));
17756 QL_PRINT_3(CE_CONT, "(%d): done\n", ha->instance);
17758 return (rval);
17762 * ql_process_idc_event - Handle an Inter-Driver Communication async event.
17764 * Input: Pointer to the adapter state structure.
17765 * Returns: void
17766 * Context: Kernel context.
17768 static void
17769 ql_process_idc_event(ql_adapter_state_t *ha)
17771 int rval;
17773 switch (ha->idc_mb[0]) {
17774 case MBA_IDC_NOTIFICATION:
17776 * The informational opcode (idc_mb[2]) can be a
17777 * defined value or the mailbox command being executed
17778 * on another function which stimulated this IDC message.
17780 ADAPTER_STATE_LOCK(ha);
17781 switch (ha->idc_mb[2]) {
17782 case IDC_OPC_DRV_START:
17783 if (ha->idc_flash_acc != 0) {
17784 ha->idc_flash_acc--;
17785 if (ha->idc_flash_acc == 0) {
17786 ha->idc_flash_acc_timer = 0;
17787 GLOBAL_HW_UNLOCK();
17790 if (ha->idc_restart_cnt != 0) {
17791 ha->idc_restart_cnt--;
17792 if (ha->idc_restart_cnt == 0) {
17793 ha->idc_restart_timer = 0;
17794 ADAPTER_STATE_UNLOCK(ha);
17795 TASK_DAEMON_LOCK(ha);
17796 ha->task_daemon_flags &= ~DRIVER_STALL;
17797 TASK_DAEMON_UNLOCK(ha);
17798 ql_restart_queues(ha);
17799 } else {
17800 ADAPTER_STATE_UNLOCK(ha);
17802 } else {
17803 ADAPTER_STATE_UNLOCK(ha);
17805 break;
17806 case IDC_OPC_FLASH_ACC:
17807 ha->idc_flash_acc_timer = 30;
17808 if (ha->idc_flash_acc == 0) {
17809 GLOBAL_HW_LOCK();
17811 ha->idc_flash_acc++;
17812 ADAPTER_STATE_UNLOCK(ha);
17813 break;
17814 case IDC_OPC_RESTART_MPI:
17815 ha->idc_restart_timer = 30;
17816 ha->idc_restart_cnt++;
17817 ADAPTER_STATE_UNLOCK(ha);
17818 TASK_DAEMON_LOCK(ha);
17819 ha->task_daemon_flags |= DRIVER_STALL;
17820 TASK_DAEMON_UNLOCK(ha);
17821 break;
17822 case IDC_OPC_PORT_RESET_MBC:
17823 case IDC_OPC_SET_PORT_CONFIG_MBC:
17824 ha->idc_restart_timer = 30;
17825 ha->idc_restart_cnt++;
17826 ADAPTER_STATE_UNLOCK(ha);
17827 TASK_DAEMON_LOCK(ha);
17828 ha->task_daemon_flags |= DRIVER_STALL;
17829 TASK_DAEMON_UNLOCK(ha);
17830 (void) ql_wait_outstanding(ha);
17831 break;
17832 default:
17833 ADAPTER_STATE_UNLOCK(ha);
17834 EL(ha, "Unknown IDC opcode=%xh %xh\n", ha->idc_mb[0],
17835 ha->idc_mb[2]);
17836 break;
17839 * If there is a timeout value associated with this IDC
17840 * notification then there is an implied requirement
17841 * that we return an ACK.
17843 if (ha->idc_mb[1] & IDC_TIMEOUT_MASK) {
17844 rval = ql_idc_ack(ha);
17845 if (rval != QL_SUCCESS) {
17846 EL(ha, "idc_ack status=%xh %xh\n", rval,
17847 ha->idc_mb[2]);
17850 break;
17851 case MBA_IDC_COMPLETE:
17853 * We don't ACK completions, only these require action.
17855 switch (ha->idc_mb[2]) {
17856 case IDC_OPC_PORT_RESET_MBC:
17857 case IDC_OPC_SET_PORT_CONFIG_MBC:
17858 ADAPTER_STATE_LOCK(ha);
17859 if (ha->idc_restart_cnt != 0) {
17860 ha->idc_restart_cnt--;
17861 if (ha->idc_restart_cnt == 0) {
17862 ha->idc_restart_timer = 0;
17863 ADAPTER_STATE_UNLOCK(ha);
17864 TASK_DAEMON_LOCK(ha);
17865 ha->task_daemon_flags &= ~DRIVER_STALL;
17866 TASK_DAEMON_UNLOCK(ha);
17867 ql_restart_queues(ha);
17868 } else {
17869 ADAPTER_STATE_UNLOCK(ha);
17871 } else {
17872 ADAPTER_STATE_UNLOCK(ha);
17874 break;
17875 default:
17876 break; /* Don't care... */
17878 break;
17879 case MBA_IDC_TIME_EXTENDED:
17880 QL_PRINT_10(CE_CONT, "(%d): MBA_IDC_TIME_EXTENDED="
17881 "%xh\n", ha->instance, ha->idc_mb[2]);
17882 break;
17883 default:
17884 EL(ha, "Inconsistent IDC event =%xh %xh\n", ha->idc_mb[0],
17885 ha->idc_mb[2]);
17886 ADAPTER_STATE_UNLOCK(ha);
17887 break;