isci: remove mmio wrappers
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / isci / core / scic_sds_controller.c
blob774c4b3b0dfb191c3b2634e7003b861e8981d0c2
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
5 * GPL LICENSE SUMMARY
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * BSD LICENSE
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <linux/device.h>
57 #include "scic_controller.h"
58 #include "scic_phy.h"
59 #include "scic_port.h"
60 #include "scic_remote_device.h"
61 #include "scic_sds_controller.h"
62 #include "scu_registers.h"
63 #include "scic_sds_phy.h"
64 #include "scic_sds_port_configuration_agent.h"
65 #include "scic_sds_port.h"
66 #include "scic_sds_remote_device.h"
67 #include "scic_sds_request.h"
68 #include "sci_environment.h"
69 #include "sci_util.h"
70 #include "scu_completion_codes.h"
71 #include "scu_constants.h"
72 #include "scu_event_codes.h"
73 #include "scu_remote_node_context.h"
74 #include "scu_task_context.h"
75 #include "scu_unsolicited_frame.h"
77 #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200
79 /**
80 * smu_dcc_get_max_ports() -
82 * This macro returns the maximum number of logical ports supported by the
83 * hardware. The caller passes in the value read from the device context
84 * capacity register and this macro will mash and shift the value appropriately.
86 #define smu_dcc_get_max_ports(dcc_value) \
88 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \
89 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \
92 /**
93 * smu_dcc_get_max_task_context() -
95 * This macro returns the maximum number of task contexts supported by the
96 * hardware. The caller passes in the value read from the device context
97 * capacity register and this macro will mash and shift the value appropriately.
99 #define smu_dcc_get_max_task_context(dcc_value) \
101 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \
102 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \
106 * smu_dcc_get_max_remote_node_context() -
108 * This macro returns the maximum number of remote node contexts supported by
109 * the hardware. The caller passes in the value read from the device context
110 * capacity register and this macro will mash and shift the value appropriately.
112 #define smu_dcc_get_max_remote_node_context(dcc_value) \
114 (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \
115 >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \
119 static void scic_sds_controller_power_control_timer_handler(
120 void *controller);
121 #define SCIC_SDS_CONTROLLER_MIN_TIMER_COUNT 3
122 #define SCIC_SDS_CONTROLLER_MAX_TIMER_COUNT 3
127 * The number of milliseconds to wait for a phy to start.
129 #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100
134 * The number of milliseconds to wait while a given phy is consuming power
135 * before allowing another set of phys to consume power. Ultimately, this will
136 * be specified by OEM parameter.
138 #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500
141 * COMPLETION_QUEUE_CYCLE_BIT() -
143 * This macro will return the cycle bit of the completion queue entry
145 #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000)
148 * NORMALIZE_GET_POINTER() -
150 * This macro will normalize the completion queue get pointer so its value can
151 * be used as an index into an array
153 #define NORMALIZE_GET_POINTER(x) \
154 ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK)
157 * NORMALIZE_PUT_POINTER() -
159 * This macro will normalize the completion queue put pointer so its value can
160 * be used as an array inde
162 #define NORMALIZE_PUT_POINTER(x) \
163 ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK)
167 * NORMALIZE_GET_POINTER_CYCLE_BIT() -
169 * This macro will normalize the completion queue cycle pointer so it matches
170 * the completion queue cycle bit
172 #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \
173 ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT))
176 * NORMALIZE_EVENT_POINTER() -
178 * This macro will normalize the completion queue event entry so its value can
179 * be used as an index.
181 #define NORMALIZE_EVENT_POINTER(x) \
183 ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \
184 >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \
188 * INCREMENT_COMPLETION_QUEUE_GET() -
190 * This macro will increment the controllers completion queue index value and
191 * possibly toggle the cycle bit if the completion queue index wraps back to 0.
193 #define INCREMENT_COMPLETION_QUEUE_GET(controller, index, cycle) \
194 INCREMENT_QUEUE_GET(\
195 (index), \
196 (cycle), \
197 (controller)->completion_queue_entries, \
198 SMU_CQGR_CYCLE_BIT \
202 * INCREMENT_EVENT_QUEUE_GET() -
204 * This macro will increment the controllers event queue index value and
205 * possibly toggle the event cycle bit if the event queue index wraps back to 0.
207 #define INCREMENT_EVENT_QUEUE_GET(controller, index, cycle) \
208 INCREMENT_QUEUE_GET(\
209 (index), \
210 (cycle), \
211 (controller)->completion_event_entries, \
212 SMU_CQGR_EVENT_CYCLE_BIT \
215 struct sci_base_memory_descriptor_list *
216 sci_controller_get_memory_descriptor_list_handle(struct scic_sds_controller *scic)
218 return &scic->parent.mdl;
221 static void scic_sds_controller_initialize_power_control(struct scic_sds_controller *scic)
223 struct isci_host *ihost = sci_object_get_association(scic);
224 scic->power_control.timer = isci_timer_create(ihost,
225 scic,
226 scic_sds_controller_power_control_timer_handler);
228 memset(scic->power_control.requesters, 0,
229 sizeof(scic->power_control.requesters));
231 scic->power_control.phys_waiting = 0;
232 scic->power_control.phys_granted_power = 0;
235 #define SCU_REMOTE_NODE_CONTEXT_ALIGNMENT (32)
236 #define SCU_TASK_CONTEXT_ALIGNMENT (256)
237 #define SCU_UNSOLICITED_FRAME_ADDRESS_ALIGNMENT (64)
238 #define SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT (1024)
239 #define SCU_UNSOLICITED_FRAME_HEADER_ALIGNMENT (64)
242 * This method builds the memory descriptor table for this controller.
243 * @this_controller: This parameter specifies the controller object for which
244 * to build the memory table.
247 static void scic_sds_controller_build_memory_descriptor_table(
248 struct scic_sds_controller *this_controller)
250 sci_base_mde_construct(
251 &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE],
252 SCU_COMPLETION_RAM_ALIGNMENT,
253 (sizeof(u32) * this_controller->completion_queue_entries),
254 (SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS)
257 sci_base_mde_construct(
258 &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT],
259 SCU_REMOTE_NODE_CONTEXT_ALIGNMENT,
260 this_controller->remote_node_entries * sizeof(union scu_remote_node_context),
261 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
264 sci_base_mde_construct(
265 &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT],
266 SCU_TASK_CONTEXT_ALIGNMENT,
267 this_controller->task_context_entries * sizeof(struct scu_task_context),
268 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
272 * The UF buffer address table size must be programmed to a power
273 * of 2. Find the first power of 2 that is equal to or greater then
274 * the number of unsolicited frame buffers to be utilized. */
275 scic_sds_unsolicited_frame_control_set_address_table_count(
276 &this_controller->uf_control
279 sci_base_mde_construct(
280 &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER],
281 SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT,
282 scic_sds_unsolicited_frame_control_get_mde_size(this_controller->uf_control),
283 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
288 * This method validates the driver supplied memory descriptor table.
289 * @this_controller:
291 * enum sci_status
293 static enum sci_status scic_sds_controller_validate_memory_descriptor_table(
294 struct scic_sds_controller *this_controller)
296 bool mde_list_valid;
298 mde_list_valid = sci_base_mde_is_valid(
299 &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE],
300 SCU_COMPLETION_RAM_ALIGNMENT,
301 (sizeof(u32) * this_controller->completion_queue_entries),
302 (SCI_MDE_ATTRIBUTE_CACHEABLE | SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS)
305 if (mde_list_valid == false)
306 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
308 mde_list_valid = sci_base_mde_is_valid(
309 &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT],
310 SCU_REMOTE_NODE_CONTEXT_ALIGNMENT,
311 this_controller->remote_node_entries * sizeof(union scu_remote_node_context),
312 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
315 if (mde_list_valid == false)
316 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
318 mde_list_valid = sci_base_mde_is_valid(
319 &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT],
320 SCU_TASK_CONTEXT_ALIGNMENT,
321 this_controller->task_context_entries * sizeof(struct scu_task_context),
322 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
325 if (mde_list_valid == false)
326 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
328 mde_list_valid = sci_base_mde_is_valid(
329 &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER],
330 SCU_UNSOLICITED_FRAME_BUFFER_ALIGNMENT,
331 scic_sds_unsolicited_frame_control_get_mde_size(this_controller->uf_control),
332 SCI_MDE_ATTRIBUTE_PHYSICALLY_CONTIGUOUS
335 if (mde_list_valid == false)
336 return SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD;
338 return SCI_SUCCESS;
342 * This method initializes the controller with the physical memory addresses
343 * that are used to communicate with the driver.
344 * @this_controller:
347 static void scic_sds_controller_ram_initialization(
348 struct scic_sds_controller *this_controller)
350 struct sci_physical_memory_descriptor *mde;
353 * The completion queue is actually placed in cacheable memory
354 * Therefore it no longer comes out of memory in the MDL. */
355 mde = &this_controller->memory_descriptors[SCU_MDE_COMPLETION_QUEUE];
356 this_controller->completion_queue = (u32 *)mde->virtual_address;
357 writel(lower_32_bits(mde->physical_address), \
358 &this_controller->smu_registers->completion_queue_lower);
359 writel(upper_32_bits(mde->physical_address),
360 &this_controller->smu_registers->completion_queue_upper);
363 * Program the location of the Remote Node Context table
364 * into the SCU. */
365 mde = &this_controller->memory_descriptors[SCU_MDE_REMOTE_NODE_CONTEXT];
366 this_controller->remote_node_context_table = (union scu_remote_node_context *)
367 mde->virtual_address;
368 writel(lower_32_bits(mde->physical_address),
369 &this_controller->smu_registers->remote_node_context_lower);
370 writel(upper_32_bits(mde->physical_address),
371 &this_controller->smu_registers->remote_node_context_upper);
373 /* Program the location of the Task Context table into the SCU. */
374 mde = &this_controller->memory_descriptors[SCU_MDE_TASK_CONTEXT];
375 this_controller->task_context_table = (struct scu_task_context *)
376 mde->virtual_address;
377 writel(lower_32_bits(mde->physical_address),
378 &this_controller->smu_registers->host_task_table_lower);
379 writel(upper_32_bits(mde->physical_address),
380 &this_controller->smu_registers->host_task_table_upper);
382 mde = &this_controller->memory_descriptors[SCU_MDE_UF_BUFFER];
383 scic_sds_unsolicited_frame_control_construct(
384 &this_controller->uf_control, mde, this_controller
388 * Inform the silicon as to the location of the UF headers and
389 * address table.
391 writel(lower_32_bits(this_controller->uf_control.headers.physical_address),
392 &this_controller->scu_registers->sdma.uf_header_base_address_lower);
393 writel(upper_32_bits(this_controller->uf_control.headers.physical_address),
394 &this_controller->scu_registers->sdma.uf_header_base_address_upper);
396 writel(lower_32_bits(this_controller->uf_control.address_table.physical_address),
397 &this_controller->scu_registers->sdma.uf_address_table_lower);
398 writel(upper_32_bits(this_controller->uf_control.address_table.physical_address),
399 &this_controller->scu_registers->sdma.uf_address_table_upper);
403 * This method initializes the task context data for the controller.
404 * @this_controller:
407 static void
408 scic_sds_controller_assign_task_entries(struct scic_sds_controller *controller)
410 u32 task_assignment;
413 * Assign all the TCs to function 0
414 * TODO: Do we actually need to read this register to write it back?
417 task_assignment =
418 readl(&controller->smu_registers->task_context_assignment[0]);
420 task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) |
421 (SMU_TCA_GEN_VAL(ENDING, controller->task_context_entries - 1)) |
422 (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE));
424 writel(task_assignment,
425 &controller->smu_registers->task_context_assignment[0]);
430 * This method initializes the hardware completion queue.
434 static void scic_sds_controller_initialize_completion_queue(
435 struct scic_sds_controller *this_controller)
437 u32 index;
438 u32 completion_queue_control_value;
439 u32 completion_queue_get_value;
440 u32 completion_queue_put_value;
442 this_controller->completion_queue_get = 0;
444 completion_queue_control_value = (
445 SMU_CQC_QUEUE_LIMIT_SET(this_controller->completion_queue_entries - 1)
446 | SMU_CQC_EVENT_LIMIT_SET(this_controller->completion_event_entries - 1)
449 writel(completion_queue_control_value,
450 &this_controller->smu_registers->completion_queue_control);
453 /* Set the completion queue get pointer and enable the queue */
454 completion_queue_get_value = (
455 (SMU_CQGR_GEN_VAL(POINTER, 0))
456 | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0))
457 | (SMU_CQGR_GEN_BIT(ENABLE))
458 | (SMU_CQGR_GEN_BIT(EVENT_ENABLE))
461 writel(completion_queue_get_value,
462 &this_controller->smu_registers->completion_queue_get);
464 /* Set the completion queue put pointer */
465 completion_queue_put_value = (
466 (SMU_CQPR_GEN_VAL(POINTER, 0))
467 | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0))
470 writel(completion_queue_put_value,
471 &this_controller->smu_registers->completion_queue_put);
474 /* Initialize the cycle bit of the completion queue entries */
475 for (index = 0; index < this_controller->completion_queue_entries; index++) {
477 * If get.cycle_bit != completion_queue.cycle_bit
478 * its not a valid completion queue entry
479 * so at system start all entries are invalid */
480 this_controller->completion_queue[index] = 0x80000000;
485 * This method initializes the hardware unsolicited frame queue.
489 static void scic_sds_controller_initialize_unsolicited_frame_queue(
490 struct scic_sds_controller *this_controller)
492 u32 frame_queue_control_value;
493 u32 frame_queue_get_value;
494 u32 frame_queue_put_value;
496 /* Write the queue size */
497 frame_queue_control_value =
498 SCU_UFQC_GEN_VAL(QUEUE_SIZE, this_controller->uf_control.address_table.count);
500 writel(frame_queue_control_value,
501 &this_controller->scu_registers->sdma.unsolicited_frame_queue_control);
503 /* Setup the get pointer for the unsolicited frame queue */
504 frame_queue_get_value = (
505 SCU_UFQGP_GEN_VAL(POINTER, 0)
506 | SCU_UFQGP_GEN_BIT(ENABLE_BIT)
509 writel(frame_queue_get_value,
510 &this_controller->scu_registers->sdma.unsolicited_frame_get_pointer);
511 /* Setup the put pointer for the unsolicited frame queue */
512 frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0);
513 writel(frame_queue_put_value,
514 &this_controller->scu_registers->sdma.unsolicited_frame_put_pointer);
518 * This method enables the hardware port task scheduler.
522 static void scic_sds_controller_enable_port_task_scheduler(
523 struct scic_sds_controller *this_controller)
525 u32 port_task_scheduler_value;
527 port_task_scheduler_value =
528 readl(&this_controller->scu_registers->peg0.ptsg.control);
529 port_task_scheduler_value |=
530 (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE));
531 writel(port_task_scheduler_value,
532 &this_controller->scu_registers->peg0.ptsg.control);
538 * This macro is used to delay between writes to the AFE registers during AFE
539 * initialization.
541 #define AFE_REGISTER_WRITE_DELAY 10
543 /* Initialize the AFE for this phy index. We need to read the AFE setup from
544 * the OEM parameters none
546 static void scic_sds_controller_afe_initialization(struct scic_sds_controller *scic)
548 const struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
549 u32 afe_status;
550 u32 phy_id;
552 /* Clear DFX Status registers */
553 writel(0x0081000f, &scic->scu_registers->afe.afe_dfx_master_control0);
554 udelay(AFE_REGISTER_WRITE_DELAY);
556 /* Configure bias currents to normal */
557 if (is_a0())
558 writel(0x00005500, &scic->scu_registers->afe.afe_bias_control);
559 else
560 writel(0x00005A00, &scic->scu_registers->afe.afe_bias_control);
562 udelay(AFE_REGISTER_WRITE_DELAY);
564 /* Enable PLL */
565 if (is_b0())
566 writel(0x80040A08, &scic->scu_registers->afe.afe_pll_control0);
567 else
568 writel(0x80040908, &scic->scu_registers->afe.afe_pll_control0);
570 udelay(AFE_REGISTER_WRITE_DELAY);
572 /* Wait for the PLL to lock */
573 do {
574 afe_status = readl(&scic->scu_registers->afe.afe_common_block_status);
575 udelay(AFE_REGISTER_WRITE_DELAY);
576 } while ((afe_status & 0x00001000) == 0);
578 if (is_b0()) {
579 /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
580 writel(0x7bcc96ad, &scic->scu_registers->afe.afe_pmsn_master_control0);
581 udelay(AFE_REGISTER_WRITE_DELAY);
584 for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
585 const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
587 if (is_b0()) {
588 /* Configure transmitter SSC parameters */
589 writel(0x00030000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
590 udelay(AFE_REGISTER_WRITE_DELAY);
591 } else {
593 * All defaults, except the Receive Word Alignament/Comma Detect
594 * Enable....(0xe800) */
595 writel(0x00004512, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
596 udelay(AFE_REGISTER_WRITE_DELAY);
598 writel(0x0050100F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
599 udelay(AFE_REGISTER_WRITE_DELAY);
603 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
604 * & increase TX int & ext bias 20%....(0xe85c) */
605 if (is_a0())
606 writel(0x000003D4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
607 else if (is_a2())
608 writel(0x000003F0, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
609 else {
610 /* Power down TX and RX (PWRDNTX and PWRDNRX) */
611 writel(0x000003d7, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
612 udelay(AFE_REGISTER_WRITE_DELAY);
615 * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
616 * & increase TX int & ext bias 20%....(0xe85c) */
617 writel(0x000003d4, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
619 udelay(AFE_REGISTER_WRITE_DELAY);
621 if (is_a0() || is_a2()) {
622 /* Enable TX equalization (0xe824) */
623 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
624 udelay(AFE_REGISTER_WRITE_DELAY);
628 * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
629 * RDD=0x0(RX Detect Enabled) ....(0xe800) */
630 writel(0x00004100, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
631 udelay(AFE_REGISTER_WRITE_DELAY);
633 /* Leave DFE/FFE on */
634 if (is_a0())
635 writel(0x3F09983F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
636 else if (is_a2())
637 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
638 else {
639 writel(0x3F11103F, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
640 udelay(AFE_REGISTER_WRITE_DELAY);
641 /* Enable TX equalization (0xe824) */
642 writel(0x00040000, &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
644 udelay(AFE_REGISTER_WRITE_DELAY);
646 writel(oem_phy->afe_tx_amp_control0,
647 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
648 udelay(AFE_REGISTER_WRITE_DELAY);
650 writel(oem_phy->afe_tx_amp_control1,
651 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
652 udelay(AFE_REGISTER_WRITE_DELAY);
654 writel(oem_phy->afe_tx_amp_control2,
655 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
656 udelay(AFE_REGISTER_WRITE_DELAY);
658 writel(oem_phy->afe_tx_amp_control3,
659 &scic->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
660 udelay(AFE_REGISTER_WRITE_DELAY);
663 /* Transfer control to the PEs */
664 writel(0x00010f00, &scic->scu_registers->afe.afe_dfx_master_control0);
665 udelay(AFE_REGISTER_WRITE_DELAY);
669 * ****************************************************************************-
670 * * SCIC SDS Controller Internal Start/Stop Routines
671 * ****************************************************************************- */
675 * This method will attempt to transition into the ready state for the
676 * controller and indicate that the controller start operation has completed
677 * if all criteria are met.
678 * @this_controller: This parameter indicates the controller object for which
679 * to transition to ready.
680 * @status: This parameter indicates the status value to be pass into the call
681 * to scic_cb_controller_start_complete().
683 * none.
685 static void scic_sds_controller_transition_to_ready(
686 struct scic_sds_controller *scic,
687 enum sci_status status)
689 struct isci_host *ihost = sci_object_get_association(scic);
691 if (scic->parent.state_machine.current_state_id ==
692 SCI_BASE_CONTROLLER_STATE_STARTING) {
694 * We move into the ready state, because some of the phys/ports
695 * may be up and operational.
697 sci_base_state_machine_change_state(&scic->parent.state_machine,
698 SCI_BASE_CONTROLLER_STATE_READY);
700 isci_host_start_complete(ihost, status);
704 static void scic_sds_controller_timeout_handler(void *_scic)
706 struct scic_sds_controller *scic = _scic;
707 struct isci_host *ihost = sci_object_get_association(scic);
708 struct sci_base_state_machine *sm = &scic->parent.state_machine;
710 if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STARTING)
711 scic_sds_controller_transition_to_ready(scic, SCI_FAILURE_TIMEOUT);
712 else if (sm->current_state_id == SCI_BASE_CONTROLLER_STATE_STOPPING) {
713 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_FAILED);
714 isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT);
715 } else /* / @todo Now what do we want to do in this case? */
716 dev_err(scic_to_dev(scic),
717 "%s: Controller timer fired when controller was not "
718 "in a state being timed.\n",
719 __func__);
722 static enum sci_status scic_sds_controller_stop_ports(struct scic_sds_controller *scic)
724 u32 index;
725 enum sci_status port_status;
726 enum sci_status status = SCI_SUCCESS;
728 for (index = 0; index < scic->logical_port_entries; index++) {
729 struct scic_sds_port *sci_port = &scic->port_table[index];
730 sci_base_port_handler_t stop;
732 stop = sci_port->state_handlers->parent.stop_handler;
733 port_status = stop(&sci_port->parent);
735 if ((port_status != SCI_SUCCESS) &&
736 (port_status != SCI_FAILURE_INVALID_STATE)) {
737 status = SCI_FAILURE;
739 dev_warn(scic_to_dev(scic),
740 "%s: Controller stop operation failed to "
741 "stop port %d because of status %d.\n",
742 __func__,
743 sci_port->logical_port_index,
744 port_status);
748 return status;
751 static inline void scic_sds_controller_phy_timer_start(
752 struct scic_sds_controller *scic)
754 isci_timer_start(scic->phy_startup_timer,
755 SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT);
757 scic->phy_startup_timer_pending = true;
760 static void scic_sds_controller_phy_timer_stop(struct scic_sds_controller *scic)
762 isci_timer_stop(scic->phy_startup_timer);
764 scic->phy_startup_timer_pending = false;
768 * scic_sds_controller_start_next_phy - start phy
769 * @scic: controller
771 * If all the phys have been started, then attempt to transition the
772 * controller to the READY state and inform the user
773 * (scic_cb_controller_start_complete()).
775 static enum sci_status scic_sds_controller_start_next_phy(struct scic_sds_controller *scic)
777 struct scic_sds_oem_params *oem = &scic->oem_parameters.sds1;
778 struct scic_sds_phy *sci_phy;
779 enum sci_status status;
781 status = SCI_SUCCESS;
783 if (scic->phy_startup_timer_pending)
784 return status;
786 if (scic->next_phy_to_start >= SCI_MAX_PHYS) {
787 bool is_controller_start_complete = true;
788 u32 state;
789 u8 index;
791 for (index = 0; index < SCI_MAX_PHYS; index++) {
792 sci_phy = &scic->phy_table[index];
793 state = sci_phy->parent.state_machine.current_state_id;
795 if (!scic_sds_phy_get_port(sci_phy))
796 continue;
798 /* The controller start operation is complete iff:
799 * - all links have been given an opportunity to start
800 * - have no indication of a connected device
801 * - have an indication of a connected device and it has
802 * finished the link training process.
804 if ((sci_phy->is_in_link_training == false &&
805 state == SCI_BASE_PHY_STATE_INITIAL) ||
806 (sci_phy->is_in_link_training == false &&
807 state == SCI_BASE_PHY_STATE_STOPPED) ||
808 (sci_phy->is_in_link_training == true &&
809 state == SCI_BASE_PHY_STATE_STARTING)) {
810 is_controller_start_complete = false;
811 break;
816 * The controller has successfully finished the start process.
817 * Inform the SCI Core user and transition to the READY state. */
818 if (is_controller_start_complete == true) {
819 scic_sds_controller_transition_to_ready(scic, SCI_SUCCESS);
820 scic_sds_controller_phy_timer_stop(scic);
822 } else {
823 sci_phy = &scic->phy_table[scic->next_phy_to_start];
825 if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
826 if (scic_sds_phy_get_port(sci_phy) == NULL) {
827 scic->next_phy_to_start++;
829 /* Caution recursion ahead be forwarned
831 * The PHY was never added to a PORT in MPC mode
832 * so start the next phy in sequence This phy
833 * will never go link up and will not draw power
834 * the OEM parameters either configured the phy
835 * incorrectly for the PORT or it was never
836 * assigned to a PORT
838 return scic_sds_controller_start_next_phy(scic);
842 status = scic_sds_phy_start(sci_phy);
844 if (status == SCI_SUCCESS) {
845 scic_sds_controller_phy_timer_start(scic);
846 } else {
847 dev_warn(scic_to_dev(scic),
848 "%s: Controller stop operation failed "
849 "to stop phy %d because of status "
850 "%d.\n",
851 __func__,
852 scic->phy_table[scic->next_phy_to_start].phy_index,
853 status);
856 scic->next_phy_to_start++;
859 return status;
862 static void scic_sds_controller_phy_startup_timeout_handler(void *_scic)
864 struct scic_sds_controller *scic = _scic;
865 enum sci_status status;
867 scic->phy_startup_timer_pending = false;
868 status = SCI_FAILURE;
869 while (status != SCI_SUCCESS)
870 status = scic_sds_controller_start_next_phy(scic);
873 static enum sci_status scic_sds_controller_initialize_phy_startup(struct scic_sds_controller *scic)
875 struct isci_host *ihost = sci_object_get_association(scic);
877 scic->phy_startup_timer = isci_timer_create(ihost,
878 scic,
879 scic_sds_controller_phy_startup_timeout_handler);
881 if (scic->phy_startup_timer == NULL)
882 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
883 else {
884 scic->next_phy_to_start = 0;
885 scic->phy_startup_timer_pending = false;
888 return SCI_SUCCESS;
891 static enum sci_status scic_sds_controller_stop_phys(struct scic_sds_controller *scic)
893 u32 index;
894 enum sci_status status;
895 enum sci_status phy_status;
897 status = SCI_SUCCESS;
899 for (index = 0; index < SCI_MAX_PHYS; index++) {
900 phy_status = scic_sds_phy_stop(&scic->phy_table[index]);
902 if (
903 (phy_status != SCI_SUCCESS)
904 && (phy_status != SCI_FAILURE_INVALID_STATE)
906 status = SCI_FAILURE;
908 dev_warn(scic_to_dev(scic),
909 "%s: Controller stop operation failed to stop "
910 "phy %d because of status %d.\n",
911 __func__,
912 scic->phy_table[index].phy_index, phy_status);
916 return status;
919 static enum sci_status scic_sds_controller_stop_devices(struct scic_sds_controller *scic)
921 u32 index;
922 enum sci_status status;
923 enum sci_status device_status;
925 status = SCI_SUCCESS;
927 for (index = 0; index < scic->remote_node_entries; index++) {
928 if (scic->device_table[index] != NULL) {
929 /* / @todo What timeout value do we want to provide to this request? */
930 device_status = scic_remote_device_stop(scic->device_table[index], 0);
932 if ((device_status != SCI_SUCCESS) &&
933 (device_status != SCI_FAILURE_INVALID_STATE)) {
934 dev_warn(scic_to_dev(scic),
935 "%s: Controller stop operation failed "
936 "to stop device 0x%p because of "
937 "status %d.\n",
938 __func__,
939 scic->device_table[index], device_status);
944 return status;
947 static void scic_sds_controller_power_control_timer_start(struct scic_sds_controller *scic)
949 isci_timer_start(scic->power_control.timer,
950 SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
952 scic->power_control.timer_started = true;
955 static void scic_sds_controller_power_control_timer_stop(struct scic_sds_controller *scic)
957 if (scic->power_control.timer_started) {
958 isci_timer_stop(scic->power_control.timer);
959 scic->power_control.timer_started = false;
963 static void scic_sds_controller_power_control_timer_restart(struct scic_sds_controller *scic)
965 scic_sds_controller_power_control_timer_stop(scic);
966 scic_sds_controller_power_control_timer_start(scic);
969 static void scic_sds_controller_power_control_timer_handler(
970 void *controller)
972 struct scic_sds_controller *this_controller;
974 this_controller = (struct scic_sds_controller *)controller;
976 this_controller->power_control.phys_granted_power = 0;
978 if (this_controller->power_control.phys_waiting == 0) {
979 this_controller->power_control.timer_started = false;
980 } else {
981 struct scic_sds_phy *the_phy = NULL;
982 u8 i;
984 for (i = 0;
985 (i < SCI_MAX_PHYS)
986 && (this_controller->power_control.phys_waiting != 0);
987 i++) {
988 if (this_controller->power_control.requesters[i] != NULL) {
989 if (this_controller->power_control.phys_granted_power <
990 this_controller->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
991 the_phy = this_controller->power_control.requesters[i];
992 this_controller->power_control.requesters[i] = NULL;
993 this_controller->power_control.phys_waiting--;
994 this_controller->power_control.phys_granted_power++;
995 scic_sds_phy_consume_power_handler(the_phy);
996 } else {
997 break;
1003 * It doesn't matter if the power list is empty, we need to start the
1004 * timer in case another phy becomes ready.
1006 scic_sds_controller_power_control_timer_start(this_controller);
1011 * This method inserts the phy in the stagger spinup control queue.
1012 * @this_controller:
1016 void scic_sds_controller_power_control_queue_insert(
1017 struct scic_sds_controller *this_controller,
1018 struct scic_sds_phy *the_phy)
1020 BUG_ON(the_phy == NULL);
1022 if (this_controller->power_control.phys_granted_power <
1023 this_controller->oem_parameters.sds1.controller.max_concurrent_dev_spin_up) {
1024 this_controller->power_control.phys_granted_power++;
1025 scic_sds_phy_consume_power_handler(the_phy);
1028 * stop and start the power_control timer. When the timer fires, the
1029 * no_of_phys_granted_power will be set to 0
1031 scic_sds_controller_power_control_timer_restart(this_controller);
1032 } else {
1033 /* Add the phy in the waiting list */
1034 this_controller->power_control.requesters[the_phy->phy_index] = the_phy;
1035 this_controller->power_control.phys_waiting++;
1040 * This method removes the phy from the stagger spinup control queue.
1041 * @this_controller:
1045 void scic_sds_controller_power_control_queue_remove(
1046 struct scic_sds_controller *this_controller,
1047 struct scic_sds_phy *the_phy)
1049 BUG_ON(the_phy == NULL);
1051 if (this_controller->power_control.requesters[the_phy->phy_index] != NULL) {
1052 this_controller->power_control.phys_waiting--;
1055 this_controller->power_control.requesters[the_phy->phy_index] = NULL;
1059 * ****************************************************************************-
1060 * * SCIC SDS Controller Completion Routines
1061 * ****************************************************************************- */
1064 * This method returns a true value if the completion queue has entries that
1065 * can be processed
1066 * @this_controller:
1068 * bool true if the completion queue has entries to process false if the
1069 * completion queue has no entries to process
1071 static bool scic_sds_controller_completion_queue_has_entries(
1072 struct scic_sds_controller *this_controller)
1074 u32 get_value = this_controller->completion_queue_get;
1075 u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK;
1077 if (
1078 NORMALIZE_GET_POINTER_CYCLE_BIT(get_value)
1079 == COMPLETION_QUEUE_CYCLE_BIT(this_controller->completion_queue[get_index])
1081 return true;
1084 return false;
1088 * This method processes a task completion notification. This is called from
1089 * within the controller completion handler.
1090 * @this_controller:
1091 * @completion_entry:
1094 static void scic_sds_controller_task_completion(
1095 struct scic_sds_controller *this_controller,
1096 u32 completion_entry)
1098 u32 index;
1099 struct scic_sds_request *io_request;
1101 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1102 io_request = this_controller->io_request_table[index];
1104 /* Make sure that we really want to process this IO request */
1105 if (
1106 (io_request != NULL)
1107 && (io_request->io_tag != SCI_CONTROLLER_INVALID_IO_TAG)
1108 && (
1109 scic_sds_io_tag_get_sequence(io_request->io_tag)
1110 == this_controller->io_request_sequence[index]
1113 /* Yep this is a valid io request pass it along to the io request handler */
1114 scic_sds_io_request_tc_completion(io_request, completion_entry);
1119 * This method processes an SDMA completion event. This is called from within
1120 * the controller completion handler.
1121 * @this_controller:
1122 * @completion_entry:
1125 static void scic_sds_controller_sdma_completion(
1126 struct scic_sds_controller *this_controller,
1127 u32 completion_entry)
1129 u32 index;
1130 struct scic_sds_request *io_request;
1131 struct scic_sds_remote_device *device;
1133 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1135 switch (scu_get_command_request_type(completion_entry)) {
1136 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC:
1137 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC:
1138 io_request = this_controller->io_request_table[index];
1139 dev_warn(scic_to_dev(this_controller),
1140 "%s: SCIC SDS Completion type SDMA %x for io request "
1141 "%p\n",
1142 __func__,
1143 completion_entry,
1144 io_request);
1145 /* @todo For a post TC operation we need to fail the IO
1146 * request
1148 break;
1150 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC:
1151 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC:
1152 case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC:
1153 device = this_controller->device_table[index];
1154 dev_warn(scic_to_dev(this_controller),
1155 "%s: SCIC SDS Completion type SDMA %x for remote "
1156 "device %p\n",
1157 __func__,
1158 completion_entry,
1159 device);
1160 /* @todo For a port RNC operation we need to fail the
1161 * device
1163 break;
1165 default:
1166 dev_warn(scic_to_dev(this_controller),
1167 "%s: SCIC SDS Completion unknown SDMA completion "
1168 "type %x\n",
1169 __func__,
1170 completion_entry);
1171 break;
1178 * @this_controller:
1179 * @completion_entry:
1181 * This method processes an unsolicited frame message. This is called from
1182 * within the controller completion handler. none
1184 static void scic_sds_controller_unsolicited_frame(
1185 struct scic_sds_controller *this_controller,
1186 u32 completion_entry)
1188 u32 index;
1189 u32 frame_index;
1191 struct scu_unsolicited_frame_header *frame_header;
1192 struct scic_sds_phy *phy;
1193 struct scic_sds_remote_device *device;
1195 enum sci_status result = SCI_FAILURE;
1197 frame_index = SCU_GET_FRAME_INDEX(completion_entry);
1199 frame_header
1200 = this_controller->uf_control.buffers.array[frame_index].header;
1201 this_controller->uf_control.buffers.array[frame_index].state
1202 = UNSOLICITED_FRAME_IN_USE;
1204 if (SCU_GET_FRAME_ERROR(completion_entry)) {
1206 * / @todo If the IAF frame or SIGNATURE FIS frame has an error will
1207 * / this cause a problem? We expect the phy initialization will
1208 * / fail if there is an error in the frame. */
1209 scic_sds_controller_release_frame(this_controller, frame_index);
1210 return;
1213 if (frame_header->is_address_frame) {
1214 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1215 phy = &this_controller->phy_table[index];
1216 if (phy != NULL) {
1217 result = scic_sds_phy_frame_handler(phy, frame_index);
1219 } else {
1221 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1223 if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1225 * This is a signature fis or a frame from a direct attached SATA
1226 * device that has not yet been created. In either case forwared
1227 * the frame to the PE and let it take care of the frame data. */
1228 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1229 phy = &this_controller->phy_table[index];
1230 result = scic_sds_phy_frame_handler(phy, frame_index);
1231 } else {
1232 if (index < this_controller->remote_node_entries)
1233 device = this_controller->device_table[index];
1234 else
1235 device = NULL;
1237 if (device != NULL)
1238 result = scic_sds_remote_device_frame_handler(device, frame_index);
1239 else
1240 scic_sds_controller_release_frame(this_controller, frame_index);
1244 if (result != SCI_SUCCESS) {
1246 * / @todo Is there any reason to report some additional error message
1247 * / when we get this failure notifiction? */
1252 * This method processes an event completion entry. This is called from within
1253 * the controller completion handler.
1254 * @this_controller:
1255 * @completion_entry:
1258 static void scic_sds_controller_event_completion(
1259 struct scic_sds_controller *this_controller,
1260 u32 completion_entry)
1262 u32 index;
1263 struct scic_sds_request *io_request;
1264 struct scic_sds_remote_device *device;
1265 struct scic_sds_phy *phy;
1267 index = SCU_GET_COMPLETION_INDEX(completion_entry);
1269 switch (scu_get_event_type(completion_entry)) {
1270 case SCU_EVENT_TYPE_SMU_COMMAND_ERROR:
1271 /* / @todo The driver did something wrong and we need to fix the condtion. */
1272 dev_err(scic_to_dev(this_controller),
1273 "%s: SCIC Controller 0x%p received SMU command error "
1274 "0x%x\n",
1275 __func__,
1276 this_controller,
1277 completion_entry);
1278 break;
1280 case SCU_EVENT_TYPE_SMU_PCQ_ERROR:
1281 case SCU_EVENT_TYPE_SMU_ERROR:
1282 case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR:
1284 * / @todo This is a hardware failure and its likely that we want to
1285 * / reset the controller. */
1286 dev_err(scic_to_dev(this_controller),
1287 "%s: SCIC Controller 0x%p received fatal controller "
1288 "event 0x%x\n",
1289 __func__,
1290 this_controller,
1291 completion_entry);
1292 break;
1294 case SCU_EVENT_TYPE_TRANSPORT_ERROR:
1295 io_request = this_controller->io_request_table[index];
1296 scic_sds_io_request_event_handler(io_request, completion_entry);
1297 break;
1299 case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT:
1300 switch (scu_get_event_specifier(completion_entry)) {
1301 case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE:
1302 case SCU_EVENT_SPECIFIC_TASK_TIMEOUT:
1303 io_request = this_controller->io_request_table[index];
1304 if (io_request != NULL)
1305 scic_sds_io_request_event_handler(io_request, completion_entry);
1306 else
1307 dev_warn(scic_to_dev(this_controller),
1308 "%s: SCIC Controller 0x%p received "
1309 "event 0x%x for io request object "
1310 "that doesnt exist.\n",
1311 __func__,
1312 this_controller,
1313 completion_entry);
1315 break;
1317 case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT:
1318 device = this_controller->device_table[index];
1319 if (device != NULL)
1320 scic_sds_remote_device_event_handler(device, completion_entry);
1321 else
1322 dev_warn(scic_to_dev(this_controller),
1323 "%s: SCIC Controller 0x%p received "
1324 "event 0x%x for remote device object "
1325 "that doesnt exist.\n",
1326 __func__,
1327 this_controller,
1328 completion_entry);
1330 break;
1332 break;
1334 case SCU_EVENT_TYPE_BROADCAST_CHANGE:
1336 * direct the broadcast change event to the phy first and then let
1337 * the phy redirect the broadcast change to the port object */
1338 case SCU_EVENT_TYPE_ERR_CNT_EVENT:
1340 * direct error counter event to the phy object since that is where
1341 * we get the event notification. This is a type 4 event. */
1342 case SCU_EVENT_TYPE_OSSP_EVENT:
1343 index = SCU_GET_PROTOCOL_ENGINE_INDEX(completion_entry);
1344 phy = &this_controller->phy_table[index];
1345 scic_sds_phy_event_handler(phy, completion_entry);
1346 break;
1348 case SCU_EVENT_TYPE_RNC_SUSPEND_TX:
1349 case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX:
1350 case SCU_EVENT_TYPE_RNC_OPS_MISC:
1351 if (index < this_controller->remote_node_entries) {
1352 device = this_controller->device_table[index];
1354 if (device != NULL)
1355 scic_sds_remote_device_event_handler(device, completion_entry);
1356 } else
1357 dev_err(scic_to_dev(this_controller),
1358 "%s: SCIC Controller 0x%p received event 0x%x "
1359 "for remote device object 0x%0x that doesnt "
1360 "exist.\n",
1361 __func__,
1362 this_controller,
1363 completion_entry,
1364 index);
1366 break;
1368 default:
1369 dev_warn(scic_to_dev(this_controller),
1370 "%s: SCIC Controller received unknown event code %x\n",
1371 __func__,
1372 completion_entry);
1373 break;
1378 * This method is a private routine for processing the completion queue entries.
1379 * @this_controller:
1382 static void scic_sds_controller_process_completions(
1383 struct scic_sds_controller *this_controller)
1385 u32 completion_count = 0;
1386 u32 completion_entry;
1387 u32 get_index;
1388 u32 get_cycle;
1389 u32 event_index;
1390 u32 event_cycle;
1392 dev_dbg(scic_to_dev(this_controller),
1393 "%s: completion queue begining get:0x%08x\n",
1394 __func__,
1395 this_controller->completion_queue_get);
1397 /* Get the component parts of the completion queue */
1398 get_index = NORMALIZE_GET_POINTER(this_controller->completion_queue_get);
1399 get_cycle = SMU_CQGR_CYCLE_BIT & this_controller->completion_queue_get;
1401 event_index = NORMALIZE_EVENT_POINTER(this_controller->completion_queue_get);
1402 event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & this_controller->completion_queue_get;
1404 while (
1405 NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle)
1406 == COMPLETION_QUEUE_CYCLE_BIT(this_controller->completion_queue[get_index])
1408 completion_count++;
1410 completion_entry = this_controller->completion_queue[get_index];
1411 INCREMENT_COMPLETION_QUEUE_GET(this_controller, get_index, get_cycle);
1413 dev_dbg(scic_to_dev(this_controller),
1414 "%s: completion queue entry:0x%08x\n",
1415 __func__,
1416 completion_entry);
1418 switch (SCU_GET_COMPLETION_TYPE(completion_entry)) {
1419 case SCU_COMPLETION_TYPE_TASK:
1420 scic_sds_controller_task_completion(this_controller, completion_entry);
1421 break;
1423 case SCU_COMPLETION_TYPE_SDMA:
1424 scic_sds_controller_sdma_completion(this_controller, completion_entry);
1425 break;
1427 case SCU_COMPLETION_TYPE_UFI:
1428 scic_sds_controller_unsolicited_frame(this_controller, completion_entry);
1429 break;
1431 case SCU_COMPLETION_TYPE_EVENT:
1432 INCREMENT_EVENT_QUEUE_GET(this_controller, event_index, event_cycle);
1433 scic_sds_controller_event_completion(this_controller, completion_entry);
1434 break;
1436 case SCU_COMPLETION_TYPE_NOTIFY:
1438 * Presently we do the same thing with a notify event that we do with the
1439 * other event codes. */
1440 INCREMENT_EVENT_QUEUE_GET(this_controller, event_index, event_cycle);
1441 scic_sds_controller_event_completion(this_controller, completion_entry);
1442 break;
1444 default:
1445 dev_warn(scic_to_dev(this_controller),
1446 "%s: SCIC Controller received unknown "
1447 "completion type %x\n",
1448 __func__,
1449 completion_entry);
1450 break;
1454 /* Update the get register if we completed one or more entries */
1455 if (completion_count > 0) {
1456 this_controller->completion_queue_get =
1457 SMU_CQGR_GEN_BIT(ENABLE)
1458 | SMU_CQGR_GEN_BIT(EVENT_ENABLE)
1459 | event_cycle | SMU_CQGR_GEN_VAL(EVENT_POINTER, event_index)
1460 | get_cycle | SMU_CQGR_GEN_VAL(POINTER, get_index);
1462 writel(this_controller->completion_queue_get,
1463 &this_controller->smu_registers->completion_queue_get);
1467 dev_dbg(scic_to_dev(this_controller),
1468 "%s: completion queue ending get:0x%08x\n",
1469 __func__,
1470 this_controller->completion_queue_get);
1474 bool scic_sds_controller_isr(struct scic_sds_controller *scic)
1476 if (scic_sds_controller_completion_queue_has_entries(scic)) {
1477 return true;
1478 } else {
1480 * we have a spurious interrupt it could be that we have already
1481 * emptied the completion queue from a previous interrupt */
1482 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1485 * There is a race in the hardware that could cause us not to be notified
1486 * of an interrupt completion if we do not take this step. We will mask
1487 * then unmask the interrupts so if there is another interrupt pending
1488 * the clearing of the interrupt source we get the next interrupt message. */
1489 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1490 writel(0, &scic->smu_registers->interrupt_mask);
1493 return false;
1496 void scic_sds_controller_completion_handler(struct scic_sds_controller *scic)
1498 /* Empty out the completion queue */
1499 if (scic_sds_controller_completion_queue_has_entries(scic))
1500 scic_sds_controller_process_completions(scic);
1502 /* Clear the interrupt and enable all interrupts again */
1503 writel(SMU_ISR_COMPLETION, &scic->smu_registers->interrupt_status);
1504 /* Could we write the value of SMU_ISR_COMPLETION? */
1505 writel(0xFF000000, &scic->smu_registers->interrupt_mask);
1506 writel(0, &scic->smu_registers->interrupt_mask);
1509 bool scic_sds_controller_error_isr(struct scic_sds_controller *scic)
1511 u32 interrupt_status;
1513 interrupt_status =
1514 readl(&scic->smu_registers->interrupt_status);
1515 interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND);
1517 if (interrupt_status != 0) {
1519 * There is an error interrupt pending so let it through and handle
1520 * in the callback */
1521 return true;
1525 * There is a race in the hardware that could cause us not to be notified
1526 * of an interrupt completion if we do not take this step. We will mask
1527 * then unmask the error interrupts so if there was another interrupt
1528 * pending we will be notified.
1529 * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */
1530 writel(0xff, &scic->smu_registers->interrupt_mask);
1531 writel(0, &scic->smu_registers->interrupt_mask);
1533 return false;
1536 void scic_sds_controller_error_handler(struct scic_sds_controller *scic)
1538 u32 interrupt_status;
1540 interrupt_status =
1541 readl(&scic->smu_registers->interrupt_status);
1543 if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) &&
1544 scic_sds_controller_completion_queue_has_entries(scic)) {
1546 scic_sds_controller_process_completions(scic);
1547 writel(SMU_ISR_QUEUE_SUSPEND, &scic->smu_registers->interrupt_status);
1548 } else {
1549 dev_err(scic_to_dev(scic), "%s: status: %#x\n", __func__,
1550 interrupt_status);
1552 sci_base_state_machine_change_state(&scic->parent.state_machine,
1553 SCI_BASE_CONTROLLER_STATE_FAILED);
1555 return;
1558 /* If we dont process any completions I am not sure that we want to do this.
1559 * We are in the middle of a hardware fault and should probably be reset.
1561 writel(0, &scic->smu_registers->interrupt_mask);
1567 void scic_sds_controller_link_up(
1568 struct scic_sds_controller *scic,
1569 struct scic_sds_port *sci_port,
1570 struct scic_sds_phy *sci_phy)
1572 scic_sds_controller_phy_handler_t link_up;
1573 u32 state;
1575 state = scic->parent.state_machine.current_state_id;
1576 link_up = scic_sds_controller_state_handler_table[state].link_up;
1578 if (link_up)
1579 link_up(scic, sci_port, sci_phy);
1580 else
1581 dev_dbg(scic_to_dev(scic),
1582 "%s: SCIC Controller linkup event from phy %d in "
1583 "unexpected state %d\n", __func__, sci_phy->phy_index,
1584 state);
1588 void scic_sds_controller_link_down(
1589 struct scic_sds_controller *scic,
1590 struct scic_sds_port *sci_port,
1591 struct scic_sds_phy *sci_phy)
1593 u32 state;
1594 scic_sds_controller_phy_handler_t link_down;
1596 state = scic->parent.state_machine.current_state_id;
1597 link_down = scic_sds_controller_state_handler_table[state].link_down;
1599 if (link_down)
1600 link_down(scic, sci_port, sci_phy);
1601 else
1602 dev_dbg(scic_to_dev(scic),
1603 "%s: SCIC Controller linkdown event from phy %d in "
1604 "unexpected state %d\n",
1605 __func__,
1606 sci_phy->phy_index, state);
1610 * This is a helper method to determine if any remote devices on this
1611 * controller are still in the stopping state.
1614 static bool scic_sds_controller_has_remote_devices_stopping(
1615 struct scic_sds_controller *this_controller)
1617 u32 index;
1619 for (index = 0; index < this_controller->remote_node_entries; index++) {
1620 if ((this_controller->device_table[index] != NULL) &&
1621 (this_controller->device_table[index]->parent.state_machine.current_state_id
1622 == SCI_BASE_REMOTE_DEVICE_STATE_STOPPING))
1623 return true;
1626 return false;
1630 * This method is called by the remote device to inform the controller
1631 * object that the remote device has stopped.
1635 void scic_sds_controller_remote_device_stopped(struct scic_sds_controller *scic,
1636 struct scic_sds_remote_device *sci_dev)
1639 u32 state;
1640 scic_sds_controller_device_handler_t stopped;
1642 state = scic->parent.state_machine.current_state_id;
1643 stopped = scic_sds_controller_state_handler_table[state].device_stopped;
1645 if (stopped)
1646 stopped(scic, sci_dev);
1647 else {
1648 dev_dbg(scic_to_dev(scic),
1649 "%s: SCIC Controller 0x%p remote device stopped event "
1650 "from device 0x%p in unexpected state %d\n",
1651 __func__, scic, sci_dev, state);
1658 * This method will write to the SCU PCP register the request value. The method
1659 * is used to suspend/resume ports, devices, and phys.
1660 * @this_controller:
1664 void scic_sds_controller_post_request(
1665 struct scic_sds_controller *this_controller,
1666 u32 request)
1668 dev_dbg(scic_to_dev(this_controller),
1669 "%s: SCIC Controller 0x%p post request 0x%08x\n",
1670 __func__,
1671 this_controller,
1672 request);
1674 writel(request, &this_controller->smu_registers->post_context_port);
1678 * This method will copy the soft copy of the task context into the physical
1679 * memory accessible by the controller.
1680 * @this_controller: This parameter specifies the controller for which to copy
1681 * the task context.
1682 * @this_request: This parameter specifies the request for which the task
1683 * context is being copied.
1685 * After this call is made the SCIC_SDS_IO_REQUEST object will always point to
1686 * the physical memory version of the task context. Thus, all subsequent
1687 * updates to the task context are performed in the TC table (i.e. DMAable
1688 * memory). none
1690 void scic_sds_controller_copy_task_context(
1691 struct scic_sds_controller *this_controller,
1692 struct scic_sds_request *this_request)
1694 struct scu_task_context *task_context_buffer;
1696 task_context_buffer = scic_sds_controller_get_task_context_buffer(
1697 this_controller, this_request->io_tag
1700 memcpy(
1701 task_context_buffer,
1702 this_request->task_context_buffer,
1703 SCI_FIELD_OFFSET(struct scu_task_context, sgl_snapshot_ac)
1707 * Now that the soft copy of the TC has been copied into the TC
1708 * table accessible by the silicon. Thus, any further changes to
1709 * the TC (e.g. TC termination) occur in the appropriate location. */
1710 this_request->task_context_buffer = task_context_buffer;
1714 * This method returns the task context buffer for the given io tag.
1715 * @this_controller:
1716 * @io_tag:
1718 * struct scu_task_context*
1720 struct scu_task_context *scic_sds_controller_get_task_context_buffer(
1721 struct scic_sds_controller *this_controller,
1722 u16 io_tag
1724 u16 task_index = scic_sds_io_tag_get_index(io_tag);
1726 if (task_index < this_controller->task_context_entries) {
1727 return &this_controller->task_context_table[task_index];
1730 return NULL;
1734 * This method returnst the sequence value from the io tag value
1735 * @this_controller:
1736 * @io_tag:
1738 * u16
1742 * This method returns the IO request associated with the tag value
1743 * @this_controller:
1744 * @io_tag:
1746 * SCIC_SDS_IO_REQUEST_T* NULL if there is no valid IO request at the tag value
1748 struct scic_sds_request *scic_sds_controller_get_io_request_from_tag(
1749 struct scic_sds_controller *this_controller,
1750 u16 io_tag
1752 u16 task_index;
1753 u16 task_sequence;
1755 task_index = scic_sds_io_tag_get_index(io_tag);
1757 if (task_index < this_controller->task_context_entries) {
1758 if (this_controller->io_request_table[task_index] != NULL) {
1759 task_sequence = scic_sds_io_tag_get_sequence(io_tag);
1761 if (task_sequence == this_controller->io_request_sequence[task_index]) {
1762 return this_controller->io_request_table[task_index];
1767 return NULL;
1771 * This method allocates remote node index and the reserves the remote node
1772 * context space for use. This method can fail if there are no more remote
1773 * node index available.
1774 * @this_controller: This is the controller object which contains the set of
1775 * free remote node ids
1776 * @the_devce: This is the device object which is requesting the a remote node
1777 * id
1778 * @node_id: This is the remote node id that is assinged to the device if one
1779 * is available
1781 * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote
1782 * node index available.
1784 enum sci_status scic_sds_controller_allocate_remote_node_context(
1785 struct scic_sds_controller *this_controller,
1786 struct scic_sds_remote_device *the_device,
1787 u16 *node_id)
1789 u16 node_index;
1790 u32 remote_node_count = scic_sds_remote_device_node_count(the_device);
1792 node_index = scic_sds_remote_node_table_allocate_remote_node(
1793 &this_controller->available_remote_nodes, remote_node_count
1796 if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) {
1797 this_controller->device_table[node_index] = the_device;
1799 *node_id = node_index;
1801 return SCI_SUCCESS;
1804 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
1808 * This method frees the remote node index back to the available pool. Once
1809 * this is done the remote node context buffer is no longer valid and can
1810 * not be used.
1811 * @this_controller:
1812 * @the_device:
1813 * @node_id:
1816 void scic_sds_controller_free_remote_node_context(
1817 struct scic_sds_controller *this_controller,
1818 struct scic_sds_remote_device *the_device,
1819 u16 node_id)
1821 u32 remote_node_count = scic_sds_remote_device_node_count(the_device);
1823 if (this_controller->device_table[node_id] == the_device) {
1824 this_controller->device_table[node_id] = NULL;
1826 scic_sds_remote_node_table_release_remote_node_index(
1827 &this_controller->available_remote_nodes, remote_node_count, node_id
1833 * This method returns the union scu_remote_node_context for the specified remote
1834 * node id.
1835 * @this_controller:
1836 * @node_id:
1838 * union scu_remote_node_context*
1840 union scu_remote_node_context *scic_sds_controller_get_remote_node_context_buffer(
1841 struct scic_sds_controller *this_controller,
1842 u16 node_id
1844 if (
1845 (node_id < this_controller->remote_node_entries)
1846 && (this_controller->device_table[node_id] != NULL)
1848 return &this_controller->remote_node_context_table[node_id];
1851 return NULL;
1856 * @resposne_buffer: This is the buffer into which the D2H register FIS will be
1857 * constructed.
1858 * @frame_header: This is the frame header returned by the hardware.
1859 * @frame_buffer: This is the frame buffer returned by the hardware.
1861 * This method will combind the frame header and frame buffer to create a SATA
1862 * D2H register FIS none
1864 void scic_sds_controller_copy_sata_response(
1865 void *response_buffer,
1866 void *frame_header,
1867 void *frame_buffer)
1869 memcpy(
1870 response_buffer,
1871 frame_header,
1872 sizeof(u32)
1875 memcpy(
1876 (char *)((char *)response_buffer + sizeof(u32)),
1877 frame_buffer,
1878 sizeof(struct sata_fis_reg_d2h) - sizeof(u32)
1883 * This method releases the frame once this is done the frame is available for
1884 * re-use by the hardware. The data contained in the frame header and frame
1885 * buffer is no longer valid. The UF queue get pointer is only updated if UF
1886 * control indicates this is appropriate.
1887 * @this_controller:
1888 * @frame_index:
1891 void scic_sds_controller_release_frame(
1892 struct scic_sds_controller *this_controller,
1893 u32 frame_index)
1895 if (scic_sds_unsolicited_frame_control_release_frame(
1896 &this_controller->uf_control, frame_index) == true)
1897 writel(this_controller->uf_control.get,
1898 &this_controller->scu_registers->sdma.unsolicited_frame_get_pointer);
1902 * This method sets user parameters and OEM parameters to default values.
1903 * Users can override these values utilizing the scic_user_parameters_set()
1904 * and scic_oem_parameters_set() methods.
1905 * @scic: This parameter specifies the controller for which to set the
1906 * configuration parameters to their default values.
1909 static void scic_sds_controller_set_default_config_parameters(struct scic_sds_controller *scic)
1911 struct isci_host *ihost = sci_object_get_association(scic);
1912 u16 index;
1914 /* Default to APC mode. */
1915 scic->oem_parameters.sds1.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
1917 /* Default to APC mode. */
1918 scic->oem_parameters.sds1.controller.max_concurrent_dev_spin_up = 1;
1920 /* Default to no SSC operation. */
1921 scic->oem_parameters.sds1.controller.do_enable_ssc = false;
1923 /* Initialize all of the port parameter information to narrow ports. */
1924 for (index = 0; index < SCI_MAX_PORTS; index++) {
1925 scic->oem_parameters.sds1.ports[index].phy_mask = 0;
1928 /* Initialize all of the phy parameter information. */
1929 for (index = 0; index < SCI_MAX_PHYS; index++) {
1930 /* Default to 6G (i.e. Gen 3) for now. */
1931 scic->user_parameters.sds1.phys[index].max_speed_generation = 3;
1933 /* the frequencies cannot be 0 */
1934 scic->user_parameters.sds1.phys[index].align_insertion_frequency = 0x7f;
1935 scic->user_parameters.sds1.phys[index].in_connection_align_insertion_frequency = 0xff;
1936 scic->user_parameters.sds1.phys[index].notify_enable_spin_up_insertion_frequency = 0x33;
1939 * Previous Vitesse based expanders had a arbitration issue that
1940 * is worked around by having the upper 32-bits of SAS address
1941 * with a value greater then the Vitesse company identifier.
1942 * Hence, usage of 0x5FCFFFFF. */
1943 scic->oem_parameters.sds1.phys[index].sas_address.low = 0x1 + ihost->id;
1944 scic->oem_parameters.sds1.phys[index].sas_address.high = 0x5FCFFFFF;
1947 scic->user_parameters.sds1.stp_inactivity_timeout = 5;
1948 scic->user_parameters.sds1.ssp_inactivity_timeout = 5;
1949 scic->user_parameters.sds1.stp_max_occupancy_timeout = 5;
1950 scic->user_parameters.sds1.ssp_max_occupancy_timeout = 20;
1951 scic->user_parameters.sds1.no_outbound_task_timeout = 20;
1955 * scic_controller_initialize() - This method will initialize the controller
1956 * hardware managed by the supplied core controller object. This method
1957 * will bring the physical controller hardware out of reset and enable the
1958 * core to determine the capabilities of the hardware being managed. Thus,
1959 * the core controller can determine it's exact physical (DMA capable)
1960 * memory requirements.
1961 * @controller: This parameter specifies the controller to be initialized.
1963 * The SCI Core user must have called scic_controller_construct() on the
1964 * supplied controller object previously. Indicate if the controller was
1965 * successfully initialized or if it failed in some way. SCI_SUCCESS This value
1966 * is returned if the controller hardware was successfully initialized.
1968 enum sci_status scic_controller_initialize(
1969 struct scic_sds_controller *scic)
1971 enum sci_status status = SCI_FAILURE_INVALID_STATE;
1972 sci_base_controller_handler_t initialize;
1973 u32 state;
1975 state = scic->parent.state_machine.current_state_id;
1976 initialize = scic_sds_controller_state_handler_table[state].base.initialize;
1978 if (initialize)
1979 status = initialize(&scic->parent);
1980 else
1981 dev_warn(scic_to_dev(scic),
1982 "%s: SCIC Controller initialize operation requested "
1983 "in invalid state %d\n", __func__, state);
1985 return status;
1989 * scic_controller_get_suggested_start_timeout() - This method returns the
1990 * suggested scic_controller_start() timeout amount. The user is free to
1991 * use any timeout value, but this method provides the suggested minimum
1992 * start timeout value. The returned value is based upon empirical
1993 * information determined as a result of interoperability testing.
1994 * @controller: the handle to the controller object for which to return the
1995 * suggested start timeout.
1997 * This method returns the number of milliseconds for the suggested start
1998 * operation timeout.
2000 u32 scic_controller_get_suggested_start_timeout(
2001 struct scic_sds_controller *sc)
2003 /* Validate the user supplied parameters. */
2004 if (sc == NULL)
2005 return 0;
2008 * The suggested minimum timeout value for a controller start operation:
2010 * Signature FIS Timeout
2011 * + Phy Start Timeout
2012 * + Number of Phy Spin Up Intervals
2013 * ---------------------------------
2014 * Number of milliseconds for the controller start operation.
2016 * NOTE: The number of phy spin up intervals will be equivalent
2017 * to the number of phys divided by the number phys allowed
2018 * per interval - 1 (once OEM parameters are supported).
2019 * Currently we assume only 1 phy per interval. */
2021 return SCIC_SDS_SIGNATURE_FIS_TIMEOUT
2022 + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT
2023 + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL);
2027 * scic_controller_start() - This method will start the supplied core
2028 * controller. This method will start the staggered spin up operation. The
2029 * SCI User completion callback is called when the following conditions are
2030 * met: -# the return status of this method is SCI_SUCCESS. -# after all of
2031 * the phys have successfully started or been given the opportunity to start.
2032 * @controller: the handle to the controller object to start.
2033 * @timeout: This parameter specifies the number of milliseconds in which the
2034 * start operation should complete.
2036 * The SCI Core user must have filled in the physical memory descriptor
2037 * structure via the sci_controller_get_memory_descriptor_list() method. The
2038 * SCI Core user must have invoked the scic_controller_initialize() method
2039 * prior to invoking this method. The controller must be in the INITIALIZED or
2040 * STARTED state. Indicate if the controller start method succeeded or failed
2041 * in some way. SCI_SUCCESS if the start operation succeeded.
2042 * SCI_WARNING_ALREADY_IN_STATE if the controller is already in the STARTED
2043 * state. SCI_FAILURE_INVALID_STATE if the controller is not either in the
2044 * INITIALIZED or STARTED states. SCI_FAILURE_INVALID_MEMORY_DESCRIPTOR if
2045 * there are inconsistent or invalid values in the supplied
2046 * struct sci_physical_memory_descriptor array.
2048 enum sci_status scic_controller_start(
2049 struct scic_sds_controller *scic,
2050 u32 timeout)
2052 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2053 sci_base_controller_timed_handler_t start;
2054 u32 state;
2056 state = scic->parent.state_machine.current_state_id;
2057 start = scic_sds_controller_state_handler_table[state].base.start;
2059 if (start)
2060 status = start(&scic->parent, timeout);
2061 else
2062 dev_warn(scic_to_dev(scic),
2063 "%s: SCIC Controller start operation requested in "
2064 "invalid state %d\n", __func__, state);
2066 return status;
2070 * scic_controller_stop() - This method will stop an individual controller
2071 * object.This method will invoke the associated user callback upon
2072 * completion. The completion callback is called when the following
2073 * conditions are met: -# the method return status is SCI_SUCCESS. -# the
2074 * controller has been quiesced. This method will ensure that all IO
2075 * requests are quiesced, phys are stopped, and all additional operation by
2076 * the hardware is halted.
2077 * @controller: the handle to the controller object to stop.
2078 * @timeout: This parameter specifies the number of milliseconds in which the
2079 * stop operation should complete.
2081 * The controller must be in the STARTED or STOPPED state. Indicate if the
2082 * controller stop method succeeded or failed in some way. SCI_SUCCESS if the
2083 * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the
2084 * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the
2085 * controller is not either in the STARTED or STOPPED states.
2087 enum sci_status scic_controller_stop(
2088 struct scic_sds_controller *scic,
2089 u32 timeout)
2091 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2092 sci_base_controller_timed_handler_t stop;
2093 u32 state;
2095 state = scic->parent.state_machine.current_state_id;
2096 stop = scic_sds_controller_state_handler_table[state].base.stop;
2098 if (stop)
2099 status = stop(&scic->parent, timeout);
2100 else
2101 dev_warn(scic_to_dev(scic),
2102 "%s: SCIC Controller stop operation requested in "
2103 "invalid state %d\n", __func__, state);
2105 return status;
2109 * scic_controller_reset() - This method will reset the supplied core
2110 * controller regardless of the state of said controller. This operation is
2111 * considered destructive. In other words, all current operations are wiped
2112 * out. No IO completions for outstanding devices occur. Outstanding IO
2113 * requests are not aborted or completed at the actual remote device.
2114 * @controller: the handle to the controller object to reset.
2116 * Indicate if the controller reset method succeeded or failed in some way.
2117 * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if
2118 * the controller reset operation is unable to complete.
2120 enum sci_status scic_controller_reset(
2121 struct scic_sds_controller *scic)
2123 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2124 sci_base_controller_handler_t reset;
2125 u32 state;
2127 state = scic->parent.state_machine.current_state_id;
2128 reset = scic_sds_controller_state_handler_table[state].base.reset;
2130 if (reset)
2131 status = reset(&scic->parent);
2132 else
2133 dev_warn(scic_to_dev(scic),
2134 "%s: SCIC Controller reset operation requested in "
2135 "invalid state %d\n", __func__, state);
2137 return status;
2141 * scic_controller_start_io() - This method is called by the SCI user to
2142 * send/start an IO request. If the method invocation is successful, then
2143 * the IO request has been queued to the hardware for processing.
2144 * @controller: the handle to the controller object for which to start an IO
2145 * request.
2146 * @remote_device: the handle to the remote device object for which to start an
2147 * IO request.
2148 * @io_request: the handle to the io request object to start.
2149 * @io_tag: This parameter specifies a previously allocated IO tag that the
2150 * user desires to be utilized for this request. This parameter is optional.
2151 * The user is allowed to supply SCI_CONTROLLER_INVALID_IO_TAG as the value
2152 * for this parameter.
2154 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2155 * to ensure that each of the methods that may allocate or free available IO
2156 * tags are handled in a mutually exclusive manner. This method is one of said
2157 * methods requiring proper critical code section protection (e.g. semaphore,
2158 * spin-lock, etc.). - For SATA, the user is required to manage NCQ tags. As a
2159 * result, it is expected the user will have set the NCQ tag field in the host
2160 * to device register FIS prior to calling this method. There is also a
2161 * requirement for the user to call scic_stp_io_set_ncq_tag() prior to invoking
2162 * the scic_controller_start_io() method. scic_controller_allocate_tag() for
2163 * more information on allocating a tag. Indicate if the controller
2164 * successfully started the IO request. SCI_IO_SUCCESS if the IO request was
2165 * successfully started. Determine the failure situations and return values.
2167 enum sci_io_status scic_controller_start_io(
2168 struct scic_sds_controller *scic,
2169 struct scic_sds_remote_device *remote_device,
2170 struct scic_sds_request *io_request,
2171 u16 io_tag)
2173 u32 state;
2174 sci_base_controller_start_request_handler_t start_io;
2176 state = scic->parent.state_machine.current_state_id;
2177 start_io = scic_sds_controller_state_handler_table[state].base.start_io;
2179 return start_io(&scic->parent,
2180 (struct sci_base_remote_device *) remote_device,
2181 (struct sci_base_request *)io_request, io_tag);
2185 * scic_controller_terminate_request() - This method is called by the SCI Core
2186 * user to terminate an ongoing (i.e. started) core IO request. This does
2187 * not abort the IO request at the target, but rather removes the IO request
2188 * from the host controller.
2189 * @controller: the handle to the controller object for which to terminate a
2190 * request.
2191 * @remote_device: the handle to the remote device object for which to
2192 * terminate a request.
2193 * @request: the handle to the io or task management request object to
2194 * terminate.
2196 * Indicate if the controller successfully began the terminate process for the
2197 * IO request. SCI_SUCCESS if the terminate process was successfully started
2198 * for the request. Determine the failure situations and return values.
2200 enum sci_status scic_controller_terminate_request(
2201 struct scic_sds_controller *scic,
2202 struct scic_sds_remote_device *remote_device,
2203 struct scic_sds_request *request)
2205 sci_base_controller_request_handler_t terminate_request;
2206 u32 state;
2208 state = scic->parent.state_machine.current_state_id;
2209 terminate_request = scic_sds_controller_state_handler_table[state].terminate_request;
2211 return terminate_request(&scic->parent,
2212 (struct sci_base_remote_device *)remote_device,
2213 (struct sci_base_request *)request);
2217 * scic_controller_complete_io() - This method will perform core specific
2218 * completion operations for an IO request. After this method is invoked,
2219 * the user should consider the IO request as invalid until it is properly
2220 * reused (i.e. re-constructed).
2221 * @controller: The handle to the controller object for which to complete the
2222 * IO request.
2223 * @remote_device: The handle to the remote device object for which to complete
2224 * the IO request.
2225 * @io_request: the handle to the io request object to complete.
2227 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2228 * to ensure that each of the methods that may allocate or free available IO
2229 * tags are handled in a mutually exclusive manner. This method is one of said
2230 * methods requiring proper critical code section protection (e.g. semaphore,
2231 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2232 * Core user, using the scic_controller_allocate_io_tag() method, then it is
2233 * the responsibility of the caller to invoke the scic_controller_free_io_tag()
2234 * method to free the tag (i.e. this method will not free the IO tag). Indicate
2235 * if the controller successfully completed the IO request. SCI_SUCCESS if the
2236 * completion process was successful.
2238 enum sci_status scic_controller_complete_io(
2239 struct scic_sds_controller *scic,
2240 struct scic_sds_remote_device *remote_device,
2241 struct scic_sds_request *io_request)
2243 u32 state;
2244 sci_base_controller_request_handler_t complete_io;
2246 state = scic->parent.state_machine.current_state_id;
2247 complete_io = scic_sds_controller_state_handler_table[state].base.complete_io;
2249 return complete_io(&scic->parent,
2250 (struct sci_base_remote_device *)remote_device,
2251 (struct sci_base_request *)io_request);
2255 * scic_controller_start_task() - This method is called by the SCIC user to
2256 * send/start a framework task management request.
2257 * @controller: the handle to the controller object for which to start the task
2258 * management request.
2259 * @remote_device: the handle to the remote device object for which to start
2260 * the task management request.
2261 * @task_request: the handle to the task request object to start.
2262 * @io_tag: This parameter specifies a previously allocated IO tag that the
2263 * user desires to be utilized for this request. Note this not the io_tag
2264 * of the request being managed. It is to be utilized for the task request
2265 * itself. This parameter is optional. The user is allowed to supply
2266 * SCI_CONTROLLER_INVALID_IO_TAG as the value for this parameter.
2268 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2269 * to ensure that each of the methods that may allocate or free available IO
2270 * tags are handled in a mutually exclusive manner. This method is one of said
2271 * methods requiring proper critical code section protection (e.g. semaphore,
2272 * spin-lock, etc.). - The user must synchronize this task with completion
2273 * queue processing. If they are not synchronized then it is possible for the
2274 * io requests that are being managed by the task request can complete before
2275 * starting the task request. scic_controller_allocate_tag() for more
2276 * information on allocating a tag. Indicate if the controller successfully
2277 * started the IO request. SCI_TASK_SUCCESS if the task request was
2278 * successfully started. SCI_TASK_FAILURE_REQUIRES_SCSI_ABORT This value is
2279 * returned if there is/are task(s) outstanding that require termination or
2280 * completion before this request can succeed.
2282 enum sci_task_status scic_controller_start_task(
2283 struct scic_sds_controller *scic,
2284 struct scic_sds_remote_device *remote_device,
2285 struct scic_sds_request *task_request,
2286 u16 task_tag)
2288 u32 state;
2289 sci_base_controller_start_request_handler_t start_task;
2290 enum sci_task_status status = SCI_TASK_FAILURE_INVALID_STATE;
2292 state = scic->parent.state_machine.current_state_id;
2293 start_task = scic_sds_controller_state_handler_table[state].base.start_task;
2295 if (start_task)
2296 status = start_task(&scic->parent,
2297 (struct sci_base_remote_device *)remote_device,
2298 (struct sci_base_request *)task_request,
2299 task_tag);
2300 else
2301 dev_warn(scic_to_dev(scic),
2302 "%s: SCIC Controller starting task from invalid "
2303 "state\n",
2304 __func__);
2306 return status;
2310 * scic_controller_complete_task() - This method will perform core specific
2311 * completion operations for task management request. After this method is
2312 * invoked, the user should consider the task request as invalid until it is
2313 * properly reused (i.e. re-constructed).
2314 * @controller: The handle to the controller object for which to complete the
2315 * task management request.
2316 * @remote_device: The handle to the remote device object for which to complete
2317 * the task management request.
2318 * @task_request: the handle to the task management request object to complete.
2320 * Indicate if the controller successfully completed the task management
2321 * request. SCI_SUCCESS if the completion process was successful.
2323 enum sci_status scic_controller_complete_task(
2324 struct scic_sds_controller *scic,
2325 struct scic_sds_remote_device *remote_device,
2326 struct scic_sds_request *task_request)
2328 u32 state;
2329 sci_base_controller_request_handler_t complete_task;
2330 enum sci_status status = SCI_FAILURE_INVALID_STATE;
2332 state = scic->parent.state_machine.current_state_id;
2333 complete_task = scic_sds_controller_state_handler_table[state].base.complete_task;
2335 if (complete_task)
2336 status = complete_task(&scic->parent,
2337 (struct sci_base_remote_device *)remote_device,
2338 (struct sci_base_request *)task_request);
2339 else
2340 dev_warn(scic_to_dev(scic),
2341 "%s: SCIC Controller completing task from invalid "
2342 "state\n",
2343 __func__);
2345 return status;
2350 * scic_controller_get_port_handle() - This method simply provides the user
2351 * with a unique handle for a given SAS/SATA core port index.
2352 * @controller: This parameter represents the handle to the controller object
2353 * from which to retrieve a port (SAS or SATA) handle.
2354 * @port_index: This parameter specifies the port index in the controller for
2355 * which to retrieve the port handle. 0 <= port_index < maximum number of
2356 * phys.
2357 * @port_handle: This parameter specifies the retrieved port handle to be
2358 * provided to the caller.
2360 * Indicate if the retrieval of the port handle was successful. SCI_SUCCESS
2361 * This value is returned if the retrieval was successful.
2362 * SCI_FAILURE_INVALID_PORT This value is returned if the supplied port id is
2363 * not in the supported range.
2365 enum sci_status scic_controller_get_port_handle(
2366 struct scic_sds_controller *scic,
2367 u8 port_index,
2368 struct scic_sds_port **port_handle)
2370 if (port_index < scic->logical_port_entries) {
2371 *port_handle = &scic->port_table[port_index];
2373 return SCI_SUCCESS;
2376 return SCI_FAILURE_INVALID_PORT;
2380 * scic_controller_get_phy_handle() - This method simply provides the user with
2381 * a unique handle for a given SAS/SATA phy index/identifier.
2382 * @controller: This parameter represents the handle to the controller object
2383 * from which to retrieve a phy (SAS or SATA) handle.
2384 * @phy_index: This parameter specifies the phy index in the controller for
2385 * which to retrieve the phy handle. 0 <= phy_index < maximum number of phys.
2386 * @phy_handle: This parameter specifies the retrieved phy handle to be
2387 * provided to the caller.
2389 * Indicate if the retrieval of the phy handle was successful. SCI_SUCCESS This
2390 * value is returned if the retrieval was successful. SCI_FAILURE_INVALID_PHY
2391 * This value is returned if the supplied phy id is not in the supported range.
2393 enum sci_status scic_controller_get_phy_handle(
2394 struct scic_sds_controller *scic,
2395 u8 phy_index,
2396 struct scic_sds_phy **phy_handle)
2398 if (phy_index < ARRAY_SIZE(scic->phy_table)) {
2399 *phy_handle = &scic->phy_table[phy_index];
2401 return SCI_SUCCESS;
2404 dev_err(scic_to_dev(scic),
2405 "%s: Controller:0x%p PhyId:0x%x invalid phy index\n",
2406 __func__, scic, phy_index);
2408 return SCI_FAILURE_INVALID_PHY;
2412 * scic_controller_allocate_io_tag() - This method will allocate a tag from the
2413 * pool of free IO tags. Direct allocation of IO tags by the SCI Core user
2414 * is optional. The scic_controller_start_io() method will allocate an IO
2415 * tag if this method is not utilized and the tag is not supplied to the IO
2416 * construct routine. Direct allocation of IO tags may provide additional
2417 * performance improvements in environments capable of supporting this usage
2418 * model. Additionally, direct allocation of IO tags also provides
2419 * additional flexibility to the SCI Core user. Specifically, the user may
2420 * retain IO tags across the lives of multiple IO requests.
2421 * @controller: the handle to the controller object for which to allocate the
2422 * tag.
2424 * IO tags are a protected resource. It is incumbent upon the SCI Core user to
2425 * ensure that each of the methods that may allocate or free available IO tags
2426 * are handled in a mutually exclusive manner. This method is one of said
2427 * methods requiring proper critical code section protection (e.g. semaphore,
2428 * spin-lock, etc.). An unsigned integer representing an available IO tag.
2429 * SCI_CONTROLLER_INVALID_IO_TAG This value is returned if there are no
2430 * currently available tags to be allocated. All return other values indicate a
2431 * legitimate tag.
2433 u16 scic_controller_allocate_io_tag(
2434 struct scic_sds_controller *scic)
2436 u16 task_context;
2437 u16 sequence_count;
2439 if (!sci_pool_empty(scic->tci_pool)) {
2440 sci_pool_get(scic->tci_pool, task_context);
2442 sequence_count = scic->io_request_sequence[task_context];
2444 return scic_sds_io_tag_construct(sequence_count, task_context);
2447 return SCI_CONTROLLER_INVALID_IO_TAG;
2451 * scic_controller_free_io_tag() - This method will free an IO tag to the pool
2452 * of free IO tags. This method provides the SCI Core user more flexibility
2453 * with regards to IO tags. The user may desire to keep an IO tag after an
2454 * IO request has completed, because they plan on re-using the tag for a
2455 * subsequent IO request. This method is only legal if the tag was
2456 * allocated via scic_controller_allocate_io_tag().
2457 * @controller: This parameter specifies the handle to the controller object
2458 * for which to free/return the tag.
2459 * @io_tag: This parameter represents the tag to be freed to the pool of
2460 * available tags.
2462 * - IO tags are a protected resource. It is incumbent upon the SCI Core user
2463 * to ensure that each of the methods that may allocate or free available IO
2464 * tags are handled in a mutually exclusive manner. This method is one of said
2465 * methods requiring proper critical code section protection (e.g. semaphore,
2466 * spin-lock, etc.). - If the IO tag for a request was allocated, by the SCI
2467 * Core user, using the scic_controller_allocate_io_tag() method, then it is
2468 * the responsibility of the caller to invoke this method to free the tag. This
2469 * method returns an indication of whether the tag was successfully put back
2470 * (freed) to the pool of available tags. SCI_SUCCESS This return value
2471 * indicates the tag was successfully placed into the pool of available IO
2472 * tags. SCI_FAILURE_INVALID_IO_TAG This value is returned if the supplied tag
2473 * is not a valid IO tag value.
2475 enum sci_status scic_controller_free_io_tag(
2476 struct scic_sds_controller *scic,
2477 u16 io_tag)
2479 u16 sequence;
2480 u16 index;
2482 BUG_ON(io_tag == SCI_CONTROLLER_INVALID_IO_TAG);
2484 sequence = scic_sds_io_tag_get_sequence(io_tag);
2485 index = scic_sds_io_tag_get_index(io_tag);
2487 if (!sci_pool_full(scic->tci_pool)) {
2488 if (sequence == scic->io_request_sequence[index]) {
2489 scic_sds_io_sequence_increment(
2490 scic->io_request_sequence[index]);
2492 sci_pool_put(scic->tci_pool, index);
2494 return SCI_SUCCESS;
2498 return SCI_FAILURE_INVALID_IO_TAG;
2501 void scic_controller_enable_interrupts(
2502 struct scic_sds_controller *scic)
2504 BUG_ON(scic->smu_registers == NULL);
2505 writel(0, &scic->smu_registers->interrupt_mask);
2508 void scic_controller_disable_interrupts(
2509 struct scic_sds_controller *scic)
2511 BUG_ON(scic->smu_registers == NULL);
2512 writel(0xffffffff, &scic->smu_registers->interrupt_mask);
2515 static enum sci_status scic_controller_set_mode(
2516 struct scic_sds_controller *scic,
2517 enum sci_controller_mode operating_mode)
2519 enum sci_status status = SCI_SUCCESS;
2521 if ((scic->parent.state_machine.current_state_id ==
2522 SCI_BASE_CONTROLLER_STATE_INITIALIZING) ||
2523 (scic->parent.state_machine.current_state_id ==
2524 SCI_BASE_CONTROLLER_STATE_INITIALIZED)) {
2525 switch (operating_mode) {
2526 case SCI_MODE_SPEED:
2527 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
2528 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
2529 scic->uf_control.buffers.count =
2530 SCU_UNSOLICITED_FRAME_COUNT;
2531 scic->completion_event_entries = SCU_EVENT_COUNT;
2532 scic->completion_queue_entries =
2533 SCU_COMPLETION_QUEUE_COUNT;
2534 scic_sds_controller_build_memory_descriptor_table(scic);
2535 break;
2537 case SCI_MODE_SIZE:
2538 scic->remote_node_entries = SCI_MIN_REMOTE_DEVICES;
2539 scic->task_context_entries = SCI_MIN_IO_REQUESTS;
2540 scic->uf_control.buffers.count =
2541 SCU_MIN_UNSOLICITED_FRAMES;
2542 scic->completion_event_entries = SCU_MIN_EVENTS;
2543 scic->completion_queue_entries =
2544 SCU_MIN_COMPLETION_QUEUE_ENTRIES;
2545 scic_sds_controller_build_memory_descriptor_table(scic);
2546 break;
2548 default:
2549 status = SCI_FAILURE_INVALID_PARAMETER_VALUE;
2550 break;
2552 } else
2553 status = SCI_FAILURE_INVALID_STATE;
2555 return status;
2559 * scic_sds_controller_reset_hardware() -
2561 * This method will reset the controller hardware.
2563 static void scic_sds_controller_reset_hardware(
2564 struct scic_sds_controller *scic)
2566 /* Disable interrupts so we dont take any spurious interrupts */
2567 scic_controller_disable_interrupts(scic);
2569 /* Reset the SCU */
2570 writel(0xFFFFFFFF, &scic->smu_registers->soft_reset_control);
2572 /* Delay for 1ms to before clearing the CQP and UFQPR. */
2573 udelay(1000);
2575 /* The write to the CQGR clears the CQP */
2576 writel(0x00000000, &scic->smu_registers->completion_queue_get);
2578 /* The write to the UFQGP clears the UFQPR */
2579 writel(0, &scic->scu_registers->sdma.unsolicited_frame_get_pointer);
2582 enum sci_status scic_user_parameters_set(
2583 struct scic_sds_controller *scic,
2584 union scic_user_parameters *scic_parms)
2586 u32 state = scic->parent.state_machine.current_state_id;
2588 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2589 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2590 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2591 u16 index;
2594 * Validate the user parameters. If they are not legal, then
2595 * return a failure.
2597 for (index = 0; index < SCI_MAX_PHYS; index++) {
2598 struct sci_phy_user_params *user_phy;
2600 user_phy = &scic_parms->sds1.phys[index];
2602 if (!((user_phy->max_speed_generation <=
2603 SCIC_SDS_PARM_MAX_SPEED) &&
2604 (user_phy->max_speed_generation >
2605 SCIC_SDS_PARM_NO_SPEED)))
2606 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2608 if (user_phy->in_connection_align_insertion_frequency <
2610 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2612 if ((user_phy->in_connection_align_insertion_frequency <
2613 3) ||
2614 (user_phy->align_insertion_frequency == 0) ||
2615 (user_phy->
2616 notify_enable_spin_up_insertion_frequency ==
2618 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2621 if ((scic_parms->sds1.stp_inactivity_timeout == 0) ||
2622 (scic_parms->sds1.ssp_inactivity_timeout == 0) ||
2623 (scic_parms->sds1.stp_max_occupancy_timeout == 0) ||
2624 (scic_parms->sds1.ssp_max_occupancy_timeout == 0) ||
2625 (scic_parms->sds1.no_outbound_task_timeout == 0))
2626 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2628 memcpy(&scic->user_parameters, scic_parms, sizeof(*scic_parms));
2630 return SCI_SUCCESS;
2633 return SCI_FAILURE_INVALID_STATE;
2636 enum sci_status scic_oem_parameters_set(
2637 struct scic_sds_controller *scic,
2638 union scic_oem_parameters *scic_parms)
2640 u32 state = scic->parent.state_machine.current_state_id;
2642 if (state == SCI_BASE_CONTROLLER_STATE_RESET ||
2643 state == SCI_BASE_CONTROLLER_STATE_INITIALIZING ||
2644 state == SCI_BASE_CONTROLLER_STATE_INITIALIZED) {
2645 u16 index;
2646 u8 combined_phy_mask = 0;
2649 * Validate the oem parameters. If they are not legal, then
2650 * return a failure. */
2651 for (index = 0; index < SCI_MAX_PORTS; index++) {
2652 if (scic_parms->sds1.ports[index].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX)
2653 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2656 for (index = 0; index < SCI_MAX_PHYS; index++) {
2657 if ((scic_parms->sds1.phys[index].sas_address.high == 0) &&
2658 (scic_parms->sds1.phys[index].sas_address.low == 0))
2659 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2662 if (scic_parms->sds1.controller.mode_type ==
2663 SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) {
2664 for (index = 0; index < SCI_MAX_PHYS; index++) {
2665 if (scic_parms->sds1.ports[index].phy_mask != 0)
2666 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2668 } else if (scic_parms->sds1.controller.mode_type ==
2669 SCIC_PORT_MANUAL_CONFIGURATION_MODE) {
2670 for (index = 0; index < SCI_MAX_PHYS; index++)
2671 combined_phy_mask |= scic_parms->sds1.ports[index].phy_mask;
2673 if (combined_phy_mask == 0)
2674 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2675 } else
2676 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2678 if (scic_parms->sds1.controller.max_concurrent_dev_spin_up >
2679 MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT)
2680 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2682 scic->oem_parameters.sds1 = scic_parms->sds1;
2684 return SCI_SUCCESS;
2687 return SCI_FAILURE_INVALID_STATE;
2690 void scic_oem_parameters_get(
2691 struct scic_sds_controller *scic,
2692 union scic_oem_parameters *scic_parms)
2694 memcpy(scic_parms, (&scic->oem_parameters), sizeof(*scic_parms));
2697 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853
2698 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280
2699 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000
2700 #define INTERRUPT_COALESCE_NUMBER_MAX 256
2701 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7
2702 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28
2705 * scic_controller_set_interrupt_coalescence() - This method allows the user to
2706 * configure the interrupt coalescence.
2707 * @controller: This parameter represents the handle to the controller object
2708 * for which its interrupt coalesce register is overridden.
2709 * @coalesce_number: Used to control the number of entries in the Completion
2710 * Queue before an interrupt is generated. If the number of entries exceed
2711 * this number, an interrupt will be generated. The valid range of the input
2712 * is [0, 256]. A setting of 0 results in coalescing being disabled.
2713 * @coalesce_timeout: Timeout value in microseconds. The valid range of the
2714 * input is [0, 2700000] . A setting of 0 is allowed and results in no
2715 * interrupt coalescing timeout.
2717 * Indicate if the user successfully set the interrupt coalesce parameters.
2718 * SCI_SUCCESS The user successfully updated the interrutp coalescence.
2719 * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range.
2721 static enum sci_status scic_controller_set_interrupt_coalescence(
2722 struct scic_sds_controller *scic_controller,
2723 u32 coalesce_number,
2724 u32 coalesce_timeout)
2726 u8 timeout_encode = 0;
2727 u32 min = 0;
2728 u32 max = 0;
2730 /* Check if the input parameters fall in the range. */
2731 if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX)
2732 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2735 * Defined encoding for interrupt coalescing timeout:
2736 * Value Min Max Units
2737 * ----- --- --- -----
2738 * 0 - - Disabled
2739 * 1 13.3 20.0 ns
2740 * 2 26.7 40.0
2741 * 3 53.3 80.0
2742 * 4 106.7 160.0
2743 * 5 213.3 320.0
2744 * 6 426.7 640.0
2745 * 7 853.3 1280.0
2746 * 8 1.7 2.6 us
2747 * 9 3.4 5.1
2748 * 10 6.8 10.2
2749 * 11 13.7 20.5
2750 * 12 27.3 41.0
2751 * 13 54.6 81.9
2752 * 14 109.2 163.8
2753 * 15 218.5 327.7
2754 * 16 436.9 655.4
2755 * 17 873.8 1310.7
2756 * 18 1.7 2.6 ms
2757 * 19 3.5 5.2
2758 * 20 7.0 10.5
2759 * 21 14.0 21.0
2760 * 22 28.0 41.9
2761 * 23 55.9 83.9
2762 * 24 111.8 167.8
2763 * 25 223.7 335.5
2764 * 26 447.4 671.1
2765 * 27 894.8 1342.2
2766 * 28 1.8 2.7 s
2767 * Others Undefined */
2770 * Use the table above to decide the encode of interrupt coalescing timeout
2771 * value for register writing. */
2772 if (coalesce_timeout == 0)
2773 timeout_encode = 0;
2774 else{
2775 /* make the timeout value in unit of (10 ns). */
2776 coalesce_timeout = coalesce_timeout * 100;
2777 min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10;
2778 max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10;
2780 /* get the encode of timeout for register writing. */
2781 for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN;
2782 timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX;
2783 timeout_encode++) {
2784 if (min <= coalesce_timeout && max > coalesce_timeout)
2785 break;
2786 else if (coalesce_timeout >= max && coalesce_timeout < min * 2
2787 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) {
2788 if ((coalesce_timeout - max) < (2 * min - coalesce_timeout))
2789 break;
2790 else{
2791 timeout_encode++;
2792 break;
2794 } else {
2795 max = max * 2;
2796 min = min * 2;
2800 if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1)
2801 /* the value is out of range. */
2802 return SCI_FAILURE_INVALID_PARAMETER_VALUE;
2805 writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) |
2806 SMU_ICC_GEN_VAL(TIMER, timeout_encode),
2807 &scic_controller->smu_registers->interrupt_coalesce_control);
2810 scic_controller->interrupt_coalesce_number = (u16)coalesce_number;
2811 scic_controller->interrupt_coalesce_timeout = coalesce_timeout / 100;
2813 return SCI_SUCCESS;
2817 struct scic_sds_controller *scic_controller_alloc(struct device *dev)
2819 return devm_kzalloc(dev, sizeof(struct scic_sds_controller), GFP_KERNEL);
2822 static enum sci_status default_controller_handler(struct sci_base_controller *base_scic,
2823 const char *func)
2825 struct scic_sds_controller *scic = container_of(base_scic, typeof(*scic), parent);
2826 u32 state = base_scic->state_machine.current_state_id;
2828 dev_warn(scic_to_dev(scic), "%s: invalid state %d\n", func, state);
2830 return SCI_FAILURE_INVALID_STATE;
2833 static enum sci_status scic_sds_controller_default_start_operation_handler(
2834 struct sci_base_controller *base_scic,
2835 struct sci_base_remote_device *remote_device,
2836 struct sci_base_request *io_request,
2837 u16 io_tag)
2839 return default_controller_handler(base_scic, __func__);
2842 static enum sci_status scic_sds_controller_default_request_handler(
2843 struct sci_base_controller *base_scic,
2844 struct sci_base_remote_device *remote_device,
2845 struct sci_base_request *io_request)
2847 return default_controller_handler(base_scic, __func__);
2850 static enum sci_status scic_sds_controller_general_reset_handler(struct sci_base_controller *base_scic)
2852 /* The reset operation is not a graceful cleanup just perform the state
2853 * transition.
2855 sci_base_state_machine_change_state(&base_scic->state_machine,
2856 SCI_BASE_CONTROLLER_STATE_RESETTING);
2858 return SCI_SUCCESS;
2861 static enum sci_status scic_sds_controller_reset_state_initialize_handler(struct sci_base_controller *base_scic)
2863 struct sci_base_state_machine *sm = &base_scic->state_machine;
2864 enum sci_status result = SCI_SUCCESS;
2865 struct scic_sds_controller *scic;
2866 struct isci_host *ihost;
2867 u32 index, state;
2869 scic = container_of(base_scic, typeof(*scic), parent);
2870 ihost = sci_object_get_association(scic);
2872 sci_base_state_machine_change_state(sm, SCI_BASE_CONTROLLER_STATE_INITIALIZING);
2874 scic->timeout_timer = isci_timer_create(ihost,
2875 scic,
2876 scic_sds_controller_timeout_handler);
2878 scic_sds_controller_initialize_phy_startup(scic);
2880 scic_sds_controller_initialize_power_control(scic);
2883 * There is nothing to do here for B0 since we do not have to
2884 * program the AFE registers.
2885 * / @todo The AFE settings are supposed to be correct for the B0 but
2886 * / presently they seem to be wrong. */
2887 scic_sds_controller_afe_initialization(scic);
2889 if (result == SCI_SUCCESS) {
2890 u32 status;
2891 u32 terminate_loop;
2893 /* Take the hardware out of reset */
2894 writel(0, &scic->smu_registers->soft_reset_control);
2897 * / @todo Provide meaningfull error code for hardware failure
2898 * result = SCI_FAILURE_CONTROLLER_HARDWARE; */
2899 result = SCI_FAILURE;
2900 terminate_loop = 100;
2902 while (terminate_loop-- && (result != SCI_SUCCESS)) {
2903 /* Loop until the hardware reports success */
2904 udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME);
2905 status = readl(&scic->smu_registers->control_status);
2907 if ((status & SCU_RAM_INIT_COMPLETED) ==
2908 SCU_RAM_INIT_COMPLETED)
2909 result = SCI_SUCCESS;
2913 if (result == SCI_SUCCESS) {
2914 u32 max_supported_ports;
2915 u32 max_supported_devices;
2916 u32 max_supported_io_requests;
2917 u32 device_context_capacity;
2920 * Determine what are the actaul device capacities that the
2921 * hardware will support */
2922 device_context_capacity =
2923 readl(&scic->smu_registers->device_context_capacity);
2926 max_supported_ports = smu_dcc_get_max_ports(device_context_capacity);
2927 max_supported_devices = smu_dcc_get_max_remote_node_context(device_context_capacity);
2928 max_supported_io_requests = smu_dcc_get_max_task_context(device_context_capacity);
2931 * Make all PEs that are unassigned match up with the
2932 * logical ports
2934 for (index = 0; index < max_supported_ports; index++) {
2935 struct scu_port_task_scheduler_group_registers *ptsg =
2936 &scic->scu_registers->peg0.ptsg;
2938 writel(index, &ptsg->protocol_engine[index]);
2941 /* Record the smaller of the two capacity values */
2942 scic->logical_port_entries =
2943 min(max_supported_ports, scic->logical_port_entries);
2945 scic->task_context_entries =
2946 min(max_supported_io_requests,
2947 scic->task_context_entries);
2949 scic->remote_node_entries =
2950 min(max_supported_devices, scic->remote_node_entries);
2953 * Now that we have the correct hardware reported minimum values
2954 * build the MDL for the controller. Default to a performance
2955 * configuration.
2957 scic_controller_set_mode(scic, SCI_MODE_SPEED);
2960 /* Initialize hardware PCI Relaxed ordering in DMA engines */
2961 if (result == SCI_SUCCESS) {
2962 u32 dma_configuration;
2964 /* Configure the payload DMA */
2965 dma_configuration =
2966 readl(&scic->scu_registers->sdma.pdma_configuration);
2967 dma_configuration |=
2968 SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2969 writel(dma_configuration,
2970 &scic->scu_registers->sdma.pdma_configuration);
2972 /* Configure the control DMA */
2973 dma_configuration =
2974 readl(&scic->scu_registers->sdma.cdma_configuration);
2975 dma_configuration |=
2976 SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE);
2977 writel(dma_configuration,
2978 &scic->scu_registers->sdma.cdma_configuration);
2982 * Initialize the PHYs before the PORTs because the PHY registers
2983 * are accessed during the port initialization.
2985 if (result == SCI_SUCCESS) {
2986 /* Initialize the phys */
2987 for (index = 0;
2988 (result == SCI_SUCCESS) && (index < SCI_MAX_PHYS);
2989 index++) {
2990 result = scic_sds_phy_initialize(
2991 &scic->phy_table[index],
2992 &scic->scu_registers->peg0.pe[index].tl,
2993 &scic->scu_registers->peg0.pe[index].ll);
2997 if (result == SCI_SUCCESS) {
2998 /* Initialize the logical ports */
2999 for (index = 0;
3000 (index < scic->logical_port_entries) &&
3001 (result == SCI_SUCCESS);
3002 index++) {
3003 result = scic_sds_port_initialize(
3004 &scic->port_table[index],
3005 &scic->scu_registers->peg0.ptsg.port[index],
3006 &scic->scu_registers->peg0.ptsg.protocol_engine,
3007 &scic->scu_registers->peg0.viit[index]);
3011 if (result == SCI_SUCCESS)
3012 result = scic_sds_port_configuration_agent_initialize(
3013 scic,
3014 &scic->port_agent);
3016 /* Advance the controller state machine */
3017 if (result == SCI_SUCCESS)
3018 state = SCI_BASE_CONTROLLER_STATE_INITIALIZED;
3019 else
3020 state = SCI_BASE_CONTROLLER_STATE_FAILED;
3021 sci_base_state_machine_change_state(sm, state);
3023 return result;
3027 * *****************************************************************************
3028 * * INITIALIZED STATE HANDLERS
3029 * ***************************************************************************** */
3033 * @controller: This is the struct sci_base_controller object which is cast
3034 * into a struct scic_sds_controller object.
3035 * @timeout: This is the allowed time for the controller object to reach the
3036 * started state.
3038 * This function is the struct scic_sds_controller start handler for the
3039 * initialized state.
3040 * - Validate we have a good memory descriptor table - Initialze the
3041 * physical memory before programming the hardware - Program the SCU hardware
3042 * with the physical memory addresses passed in the memory descriptor table. -
3043 * Initialzie the TCi pool - Initialize the RNi pool - Initialize the
3044 * completion queue - Initialize the unsolicited frame data - Take the SCU port
3045 * task scheduler out of reset - Start the first phy object. - Transition to
3046 * SCI_BASE_CONTROLLER_STATE_STARTING. enum sci_status SCI_SUCCESS if all of the
3047 * controller start operations complete
3048 * SCI_FAILURE_UNSUPPORTED_INFORMATION_FIELD if one or more of the memory
3049 * descriptor fields is invalid.
3051 static enum sci_status scic_sds_controller_initialized_state_start_handler(
3052 struct sci_base_controller *base_scic,
3053 u32 timeout)
3055 u16 index;
3056 enum sci_status result;
3057 struct scic_sds_controller *scic;
3059 scic = container_of(base_scic, typeof(*scic), parent);
3062 * Make sure that the SCI User filled in the memory descriptor
3063 * table correctly
3065 result = scic_sds_controller_validate_memory_descriptor_table(scic);
3067 if (result == SCI_SUCCESS) {
3069 * The memory descriptor list looks good so program the
3070 * hardware
3072 scic_sds_controller_ram_initialization(scic);
3075 if (result == SCI_SUCCESS) {
3076 /* Build the TCi free pool */
3077 sci_pool_initialize(scic->tci_pool);
3078 for (index = 0; index < scic->task_context_entries; index++)
3079 sci_pool_put(scic->tci_pool, index);
3081 /* Build the RNi free pool */
3082 scic_sds_remote_node_table_initialize(
3083 &scic->available_remote_nodes,
3084 scic->remote_node_entries);
3087 if (result == SCI_SUCCESS) {
3089 * Before anything else lets make sure we will not be
3090 * interrupted by the hardware.
3092 scic_controller_disable_interrupts(scic);
3094 /* Enable the port task scheduler */
3095 scic_sds_controller_enable_port_task_scheduler(scic);
3097 /* Assign all the task entries to scic physical function */
3098 scic_sds_controller_assign_task_entries(scic);
3100 /* Now initialze the completion queue */
3101 scic_sds_controller_initialize_completion_queue(scic);
3103 /* Initialize the unsolicited frame queue for use */
3104 scic_sds_controller_initialize_unsolicited_frame_queue(scic);
3107 /* Start all of the ports on this controller */
3108 for (index = 0;
3109 (index < scic->logical_port_entries) && (result == SCI_SUCCESS);
3110 index++) {
3111 struct scic_sds_port *sci_port = &scic->port_table[index];
3113 result = sci_port->state_handlers->parent.start_handler(
3114 &sci_port->parent);
3117 if (result == SCI_SUCCESS) {
3118 scic_sds_controller_start_next_phy(scic);
3120 isci_timer_start(scic->timeout_timer, timeout);
3122 sci_base_state_machine_change_state(&base_scic->state_machine,
3123 SCI_BASE_CONTROLLER_STATE_STARTING);
3126 return result;
3130 * *****************************************************************************
3131 * * INITIALIZED STATE HANDLERS
3132 * ***************************************************************************** */
3136 * @controller: This is struct scic_sds_controller which receives the link up
3137 * notification.
3138 * @port: This is struct scic_sds_port with which the phy is associated.
3139 * @phy: This is the struct scic_sds_phy which has gone link up.
3141 * This method is called when the struct scic_sds_controller is in the starting state
3142 * link up handler is called. This method will perform the following: - Stop
3143 * the phy timer - Start the next phy - Report the link up condition to the
3144 * port object none
3146 static void scic_sds_controller_starting_state_link_up_handler(
3147 struct scic_sds_controller *this_controller,
3148 struct scic_sds_port *port,
3149 struct scic_sds_phy *phy)
3151 scic_sds_controller_phy_timer_stop(this_controller);
3153 this_controller->port_agent.link_up_handler(
3154 this_controller, &this_controller->port_agent, port, phy
3156 /* scic_sds_port_link_up(port, phy); */
3158 scic_sds_controller_start_next_phy(this_controller);
3163 * @controller: This is struct scic_sds_controller which receives the link down
3164 * notification.
3165 * @port: This is struct scic_sds_port with which the phy is associated.
3166 * @phy: This is the struct scic_sds_phy which has gone link down.
3168 * This method is called when the struct scic_sds_controller is in the starting state
3169 * link down handler is called. - Report the link down condition to the port
3170 * object none
3172 static void scic_sds_controller_starting_state_link_down_handler(
3173 struct scic_sds_controller *this_controller,
3174 struct scic_sds_port *port,
3175 struct scic_sds_phy *phy)
3177 this_controller->port_agent.link_down_handler(
3178 this_controller, &this_controller->port_agent, port, phy
3180 /* scic_sds_port_link_down(port, phy); */
3183 static enum sci_status scic_sds_controller_ready_state_stop_handler(struct sci_base_controller *base_scic,
3184 u32 timeout)
3186 struct scic_sds_controller *scic;
3188 scic = container_of(base_scic, typeof(*scic), parent);
3189 isci_timer_start(scic->timeout_timer, timeout);
3190 sci_base_state_machine_change_state(&base_scic->state_machine,
3191 SCI_BASE_CONTROLLER_STATE_STOPPING);
3193 return SCI_SUCCESS;
3198 * @controller: This is struct sci_base_controller object which is cast into a
3199 * struct scic_sds_controller object.
3200 * @remote_device: This is struct sci_base_remote_device which is cast to a
3201 * struct scic_sds_remote_device object.
3202 * @io_request: This is the struct sci_base_request which is cast to a
3203 * SCIC_SDS_IO_REQUEST object.
3204 * @io_tag: This is the IO tag to be assigned to the IO request or
3205 * SCI_CONTROLLER_INVALID_IO_TAG.
3207 * This method is called when the struct scic_sds_controller is in the ready state and
3208 * the start io handler is called. - Start the io request on the remote device
3209 * - if successful - assign the io_request to the io_request_table - post the
3210 * request to the hardware enum sci_status SCI_SUCCESS if the start io operation
3211 * succeeds SCI_FAILURE_INSUFFICIENT_RESOURCES if the IO tag could not be
3212 * allocated for the io request. SCI_FAILURE_INVALID_STATE if one or more
3213 * objects are not in a valid state to accept io requests. How does the io_tag
3214 * parameter get assigned to the io request?
3216 static enum sci_status scic_sds_controller_ready_state_start_io_handler(
3217 struct sci_base_controller *controller,
3218 struct sci_base_remote_device *remote_device,
3219 struct sci_base_request *io_request,
3220 u16 io_tag)
3222 enum sci_status status;
3224 struct scic_sds_controller *this_controller;
3225 struct scic_sds_request *the_request;
3226 struct scic_sds_remote_device *the_device;
3228 this_controller = (struct scic_sds_controller *)controller;
3229 the_request = (struct scic_sds_request *)io_request;
3230 the_device = (struct scic_sds_remote_device *)remote_device;
3232 status = scic_sds_remote_device_start_io(this_controller, the_device, the_request);
3234 if (status == SCI_SUCCESS) {
3235 this_controller->io_request_table[
3236 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3238 scic_sds_controller_post_request(
3239 this_controller,
3240 scic_sds_request_get_post_context(the_request)
3244 return status;
3249 * @controller: This is struct sci_base_controller object which is cast into a
3250 * struct scic_sds_controller object.
3251 * @remote_device: This is struct sci_base_remote_device which is cast to a
3252 * struct scic_sds_remote_device object.
3253 * @io_request: This is the struct sci_base_request which is cast to a
3254 * SCIC_SDS_IO_REQUEST object.
3256 * This method is called when the struct scic_sds_controller is in the ready state and
3257 * the complete io handler is called. - Complete the io request on the remote
3258 * device - if successful - remove the io_request to the io_request_table
3259 * enum sci_status SCI_SUCCESS if the start io operation succeeds
3260 * SCI_FAILURE_INVALID_STATE if one or more objects are not in a valid state to
3261 * accept io requests.
3263 static enum sci_status scic_sds_controller_ready_state_complete_io_handler(
3264 struct sci_base_controller *controller,
3265 struct sci_base_remote_device *remote_device,
3266 struct sci_base_request *io_request)
3268 u16 index;
3269 enum sci_status status;
3270 struct scic_sds_controller *this_controller;
3271 struct scic_sds_request *the_request;
3272 struct scic_sds_remote_device *the_device;
3274 this_controller = (struct scic_sds_controller *)controller;
3275 the_request = (struct scic_sds_request *)io_request;
3276 the_device = (struct scic_sds_remote_device *)remote_device;
3278 status = scic_sds_remote_device_complete_io(
3279 this_controller, the_device, the_request);
3281 if (status == SCI_SUCCESS) {
3282 index = scic_sds_io_tag_get_index(the_request->io_tag);
3283 this_controller->io_request_table[index] = NULL;
3286 return status;
3291 * @controller: This is struct sci_base_controller object which is cast into a
3292 * struct scic_sds_controller object.
3293 * @remote_device: This is struct sci_base_remote_device which is cast to a
3294 * struct scic_sds_remote_device object.
3295 * @io_request: This is the struct sci_base_request which is cast to a
3296 * SCIC_SDS_IO_REQUEST object.
3298 * This method is called when the struct scic_sds_controller is in the ready state and
3299 * the continue io handler is called. enum sci_status
3301 static enum sci_status scic_sds_controller_ready_state_continue_io_handler(
3302 struct sci_base_controller *controller,
3303 struct sci_base_remote_device *remote_device,
3304 struct sci_base_request *io_request)
3306 struct scic_sds_controller *this_controller;
3307 struct scic_sds_request *the_request;
3309 the_request = (struct scic_sds_request *)io_request;
3310 this_controller = (struct scic_sds_controller *)controller;
3312 this_controller->io_request_table[
3313 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3315 scic_sds_controller_post_request(
3316 this_controller,
3317 scic_sds_request_get_post_context(the_request)
3320 return SCI_SUCCESS;
3325 * @controller: This is struct sci_base_controller object which is cast into a
3326 * struct scic_sds_controller object.
3327 * @remote_device: This is struct sci_base_remote_device which is cast to a
3328 * struct scic_sds_remote_device object.
3329 * @io_request: This is the struct sci_base_request which is cast to a
3330 * SCIC_SDS_IO_REQUEST object.
3331 * @task_tag: This is the task tag to be assigned to the task request or
3332 * SCI_CONTROLLER_INVALID_IO_TAG.
3334 * This method is called when the struct scic_sds_controller is in the ready state and
3335 * the start task handler is called. - The remote device is requested to start
3336 * the task request - if successful - assign the task to the io_request_table -
3337 * post the request to the SCU hardware enum sci_status SCI_SUCCESS if the start io
3338 * operation succeeds SCI_FAILURE_INSUFFICIENT_RESOURCES if the IO tag could
3339 * not be allocated for the io request. SCI_FAILURE_INVALID_STATE if one or
3340 * more objects are not in a valid state to accept io requests. How does the io
3341 * tag get assigned in this code path?
3343 static enum sci_status scic_sds_controller_ready_state_start_task_handler(
3344 struct sci_base_controller *controller,
3345 struct sci_base_remote_device *remote_device,
3346 struct sci_base_request *io_request,
3347 u16 task_tag)
3349 struct scic_sds_controller *this_controller = (struct scic_sds_controller *)
3350 controller;
3351 struct scic_sds_request *the_request = (struct scic_sds_request *)
3352 io_request;
3353 struct scic_sds_remote_device *the_device = (struct scic_sds_remote_device *)
3354 remote_device;
3355 enum sci_status status;
3357 status = scic_sds_remote_device_start_task(
3358 this_controller, the_device, the_request
3361 if (status == SCI_SUCCESS) {
3362 this_controller->io_request_table[
3363 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3365 scic_sds_controller_post_request(
3366 this_controller,
3367 scic_sds_request_get_post_context(the_request)
3369 } else if (status == SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS) {
3370 this_controller->io_request_table[
3371 scic_sds_io_tag_get_index(the_request->io_tag)] = the_request;
3374 * We will let framework know this task request started successfully,
3375 * although core is still woring on starting the request (to post tc when
3376 * RNC is resumed.) */
3377 status = SCI_SUCCESS;
3379 return status;
3384 * @controller: This is struct sci_base_controller object which is cast into a
3385 * struct scic_sds_controller object.
3386 * @remote_device: This is struct sci_base_remote_device which is cast to a
3387 * struct scic_sds_remote_device object.
3388 * @io_request: This is the struct sci_base_request which is cast to a
3389 * SCIC_SDS_IO_REQUEST object.
3391 * This method is called when the struct scic_sds_controller is in the ready state and
3392 * the terminate request handler is called. - call the io request terminate
3393 * function - if successful - post the terminate request to the SCU hardware
3394 * enum sci_status SCI_SUCCESS if the start io operation succeeds
3395 * SCI_FAILURE_INVALID_STATE if one or more objects are not in a valid state to
3396 * accept io requests.
3398 static enum sci_status scic_sds_controller_ready_state_terminate_request_handler(
3399 struct sci_base_controller *controller,
3400 struct sci_base_remote_device *remote_device,
3401 struct sci_base_request *io_request)
3403 struct scic_sds_controller *this_controller = (struct scic_sds_controller *)
3404 controller;
3405 struct scic_sds_request *the_request = (struct scic_sds_request *)
3406 io_request;
3407 enum sci_status status;
3409 status = scic_sds_io_request_terminate(the_request);
3410 if (status == SCI_SUCCESS) {
3412 * Utilize the original post context command and or in the POST_TC_ABORT
3413 * request sub-type. */
3414 scic_sds_controller_post_request(
3415 this_controller,
3416 scic_sds_request_get_post_context(the_request)
3417 | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT
3421 return status;
3426 * @controller: This is struct scic_sds_controller which receives the link up
3427 * notification.
3428 * @port: This is struct scic_sds_port with which the phy is associated.
3429 * @phy: This is the struct scic_sds_phy which has gone link up.
3431 * This method is called when the struct scic_sds_controller is in the starting state
3432 * link up handler is called. This method will perform the following: - Stop
3433 * the phy timer - Start the next phy - Report the link up condition to the
3434 * port object none
3436 static void scic_sds_controller_ready_state_link_up_handler(
3437 struct scic_sds_controller *this_controller,
3438 struct scic_sds_port *port,
3439 struct scic_sds_phy *phy)
3441 this_controller->port_agent.link_up_handler(
3442 this_controller, &this_controller->port_agent, port, phy
3448 * @controller: This is struct scic_sds_controller which receives the link down
3449 * notification.
3450 * @port: This is struct scic_sds_port with which the phy is associated.
3451 * @phy: This is the struct scic_sds_phy which has gone link down.
3453 * This method is called when the struct scic_sds_controller is in the starting state
3454 * link down handler is called. - Report the link down condition to the port
3455 * object none
3457 static void scic_sds_controller_ready_state_link_down_handler(
3458 struct scic_sds_controller *this_controller,
3459 struct scic_sds_port *port,
3460 struct scic_sds_phy *phy)
3462 this_controller->port_agent.link_down_handler(
3463 this_controller, &this_controller->port_agent, port, phy
3468 * *****************************************************************************
3469 * * STOPPING STATE HANDLERS
3470 * ***************************************************************************** */
3474 * @controller: This is struct sci_base_controller object which is cast into a
3475 * struct scic_sds_controller object.
3476 * @remote_device: This is struct sci_base_remote_device which is cast to a
3477 * struct scic_sds_remote_device object.
3478 * @io_request: This is the struct sci_base_request which is cast to a
3479 * SCIC_SDS_IO_REQUEST object.
3481 * This method is called when the struct scic_sds_controller is in a stopping state
3482 * and the complete io handler is called. - This function is not yet
3483 * implemented enum sci_status SCI_FAILURE
3485 static enum sci_status scic_sds_controller_stopping_state_complete_io_handler(
3486 struct sci_base_controller *controller,
3487 struct sci_base_remote_device *remote_device,
3488 struct sci_base_request *io_request)
3490 struct scic_sds_controller *this_controller;
3492 this_controller = (struct scic_sds_controller *)controller;
3494 /* / @todo Implement this function */
3495 return SCI_FAILURE;
3500 * @controller: This is struct sci_base_controller object which is cast into a
3501 * struct scic_sds_controller object.
3502 * @remote_device: This is struct sci_base_remote_device which is cast to a
3503 * struct scic_sds_remote_device object.
3505 * This method is called when the struct scic_sds_controller is in a stopping state
3506 * and the remote device has stopped.
3508 static void scic_sds_controller_stopping_state_device_stopped_handler(
3509 struct scic_sds_controller *controller,
3510 struct scic_sds_remote_device *remote_device
3513 if (!scic_sds_controller_has_remote_devices_stopping(controller)) {
3514 sci_base_state_machine_change_state(
3515 &controller->parent.state_machine,
3516 SCI_BASE_CONTROLLER_STATE_STOPPED
3521 const struct scic_sds_controller_state_handler scic_sds_controller_state_handler_table[] = {
3522 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
3523 .base.start_io = scic_sds_controller_default_start_operation_handler,
3524 .base.complete_io = scic_sds_controller_default_request_handler,
3525 .base.continue_io = scic_sds_controller_default_request_handler,
3526 .terminate_request = scic_sds_controller_default_request_handler,
3528 [SCI_BASE_CONTROLLER_STATE_RESET] = {
3529 .base.reset = scic_sds_controller_general_reset_handler,
3530 .base.initialize = scic_sds_controller_reset_state_initialize_handler,
3531 .base.start_io = scic_sds_controller_default_start_operation_handler,
3532 .base.complete_io = scic_sds_controller_default_request_handler,
3533 .base.continue_io = scic_sds_controller_default_request_handler,
3534 .terminate_request = scic_sds_controller_default_request_handler,
3536 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {
3537 .base.start_io = scic_sds_controller_default_start_operation_handler,
3538 .base.complete_io = scic_sds_controller_default_request_handler,
3539 .base.continue_io = scic_sds_controller_default_request_handler,
3540 .terminate_request = scic_sds_controller_default_request_handler,
3542 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {
3543 .base.start = scic_sds_controller_initialized_state_start_handler,
3544 .base.start_io = scic_sds_controller_default_start_operation_handler,
3545 .base.complete_io = scic_sds_controller_default_request_handler,
3546 .base.continue_io = scic_sds_controller_default_request_handler,
3547 .terminate_request = scic_sds_controller_default_request_handler,
3549 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
3550 .base.start_io = scic_sds_controller_default_start_operation_handler,
3551 .base.complete_io = scic_sds_controller_default_request_handler,
3552 .base.continue_io = scic_sds_controller_default_request_handler,
3553 .terminate_request = scic_sds_controller_default_request_handler,
3554 .link_up = scic_sds_controller_starting_state_link_up_handler,
3555 .link_down = scic_sds_controller_starting_state_link_down_handler
3557 [SCI_BASE_CONTROLLER_STATE_READY] = {
3558 .base.stop = scic_sds_controller_ready_state_stop_handler,
3559 .base.reset = scic_sds_controller_general_reset_handler,
3560 .base.start_io = scic_sds_controller_ready_state_start_io_handler,
3561 .base.complete_io = scic_sds_controller_ready_state_complete_io_handler,
3562 .base.continue_io = scic_sds_controller_ready_state_continue_io_handler,
3563 .base.start_task = scic_sds_controller_ready_state_start_task_handler,
3564 .base.complete_task = scic_sds_controller_ready_state_complete_io_handler,
3565 .terminate_request = scic_sds_controller_ready_state_terminate_request_handler,
3566 .link_up = scic_sds_controller_ready_state_link_up_handler,
3567 .link_down = scic_sds_controller_ready_state_link_down_handler
3569 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
3570 .base.start_io = scic_sds_controller_default_start_operation_handler,
3571 .base.complete_io = scic_sds_controller_default_request_handler,
3572 .base.continue_io = scic_sds_controller_default_request_handler,
3573 .terminate_request = scic_sds_controller_default_request_handler,
3575 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
3576 .base.start_io = scic_sds_controller_default_start_operation_handler,
3577 .base.complete_io = scic_sds_controller_stopping_state_complete_io_handler,
3578 .base.continue_io = scic_sds_controller_default_request_handler,
3579 .terminate_request = scic_sds_controller_default_request_handler,
3580 .device_stopped = scic_sds_controller_stopping_state_device_stopped_handler,
3582 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {
3583 .base.reset = scic_sds_controller_general_reset_handler,
3584 .base.start_io = scic_sds_controller_default_start_operation_handler,
3585 .base.complete_io = scic_sds_controller_default_request_handler,
3586 .base.continue_io = scic_sds_controller_default_request_handler,
3587 .terminate_request = scic_sds_controller_default_request_handler,
3589 [SCI_BASE_CONTROLLER_STATE_FAILED] = {
3590 .base.reset = scic_sds_controller_general_reset_handler,
3591 .base.start_io = scic_sds_controller_default_start_operation_handler,
3592 .base.complete_io = scic_sds_controller_default_request_handler,
3593 .base.continue_io = scic_sds_controller_default_request_handler,
3594 .terminate_request = scic_sds_controller_default_request_handler,
3600 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3601 * object.
3603 * This method implements the actions taken by the struct scic_sds_controller on entry
3604 * to the SCI_BASE_CONTROLLER_STATE_INITIAL. - Set the state handlers to the
3605 * controllers initial state. none This function should initialze the
3606 * controller object.
3608 static void scic_sds_controller_initial_state_enter(
3609 struct sci_base_object *object)
3611 struct scic_sds_controller *this_controller;
3613 this_controller = (struct scic_sds_controller *)object;
3615 sci_base_state_machine_change_state(
3616 &this_controller->parent.state_machine, SCI_BASE_CONTROLLER_STATE_RESET);
3621 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3622 * object.
3624 * This method implements the actions taken by the struct scic_sds_controller on exit
3625 * from the SCI_BASE_CONTROLLER_STATE_STARTING. - This function stops the
3626 * controller starting timeout timer. none
3628 static inline void scic_sds_controller_starting_state_exit(
3629 struct sci_base_object *object)
3631 struct scic_sds_controller *scic = (struct scic_sds_controller *)object;
3633 isci_timer_stop(scic->timeout_timer);
3638 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3639 * object.
3641 * This method implements the actions taken by the struct scic_sds_controller on entry
3642 * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
3643 * controllers ready state. none
3645 static void scic_sds_controller_ready_state_enter(
3646 struct sci_base_object *object)
3648 struct scic_sds_controller *this_controller;
3650 this_controller = (struct scic_sds_controller *)object;
3652 /* set the default interrupt coalescence number and timeout value. */
3653 scic_controller_set_interrupt_coalescence(
3654 this_controller, 0x10, 250);
3659 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3660 * object.
3662 * This method implements the actions taken by the struct scic_sds_controller on exit
3663 * from the SCI_BASE_CONTROLLER_STATE_READY. - This function does nothing. none
3665 static void scic_sds_controller_ready_state_exit(
3666 struct sci_base_object *object)
3668 struct scic_sds_controller *this_controller;
3670 this_controller = (struct scic_sds_controller *)object;
3672 /* disable interrupt coalescence. */
3673 scic_controller_set_interrupt_coalescence(this_controller, 0, 0);
3678 * @object: This is the struct sci_base_object which is cast to a struct scic_sds_controller
3679 * object.
3681 * This method implements the actions taken by the struct scic_sds_controller on entry
3682 * to the SCI_BASE_CONTROLLER_STATE_READY. - Set the state handlers to the
3683 * controllers ready state. - Stop the phys on this controller - Stop the ports
3684 * on this controller - Stop all of the remote devices on this controller none
3686 static void scic_sds_controller_stopping_state_enter(
3687 struct sci_base_object *object)
3689 struct scic_sds_controller *this_controller;
3691 this_controller = (struct scic_sds_controller *)object;
3693 /* Stop all of the components for this controller */
3694 scic_sds_controller_stop_phys(this_controller);
3695 scic_sds_controller_stop_ports(this_controller);
3696 scic_sds_controller_stop_devices(this_controller);
3701 * @object: This is the struct sci_base_object which is cast to a struct
3702 * scic_sds_controller object.
3704 * This funciton implements the actions taken by the struct scic_sds_controller
3705 * on exit from the SCI_BASE_CONTROLLER_STATE_STOPPING. -
3706 * This function stops the controller stopping timeout timer.
3708 static inline void scic_sds_controller_stopping_state_exit(
3709 struct sci_base_object *object)
3711 struct scic_sds_controller *scic =
3712 (struct scic_sds_controller *)object;
3714 isci_timer_stop(scic->timeout_timer);
3717 static void scic_sds_controller_resetting_state_enter(struct sci_base_object *object)
3719 struct scic_sds_controller *scic;
3721 scic = container_of(object, typeof(*scic), parent.parent);
3722 scic_sds_controller_reset_hardware(scic);
3723 sci_base_state_machine_change_state(&scic->parent.state_machine,
3724 SCI_BASE_CONTROLLER_STATE_RESET);
3727 static const struct sci_base_state scic_sds_controller_state_table[] = {
3728 [SCI_BASE_CONTROLLER_STATE_INITIAL] = {
3729 .enter_state = scic_sds_controller_initial_state_enter,
3731 [SCI_BASE_CONTROLLER_STATE_RESET] = {},
3732 [SCI_BASE_CONTROLLER_STATE_INITIALIZING] = {},
3733 [SCI_BASE_CONTROLLER_STATE_INITIALIZED] = {},
3734 [SCI_BASE_CONTROLLER_STATE_STARTING] = {
3735 .exit_state = scic_sds_controller_starting_state_exit,
3737 [SCI_BASE_CONTROLLER_STATE_READY] = {
3738 .enter_state = scic_sds_controller_ready_state_enter,
3739 .exit_state = scic_sds_controller_ready_state_exit,
3741 [SCI_BASE_CONTROLLER_STATE_RESETTING] = {
3742 .enter_state = scic_sds_controller_resetting_state_enter,
3744 [SCI_BASE_CONTROLLER_STATE_STOPPING] = {
3745 .enter_state = scic_sds_controller_stopping_state_enter,
3746 .exit_state = scic_sds_controller_stopping_state_exit,
3748 [SCI_BASE_CONTROLLER_STATE_STOPPED] = {},
3749 [SCI_BASE_CONTROLLER_STATE_FAILED] = {}
3753 * scic_controller_construct() - This method will attempt to construct a
3754 * controller object utilizing the supplied parameter information.
3755 * @c: This parameter specifies the controller to be constructed.
3756 * @scu_base: mapped base address of the scu registers
3757 * @smu_base: mapped base address of the smu registers
3759 * Indicate if the controller was successfully constructed or if it failed in
3760 * some way. SCI_SUCCESS This value is returned if the controller was
3761 * successfully constructed. SCI_WARNING_TIMER_CONFLICT This value is returned
3762 * if the interrupt coalescence timer may cause SAS compliance issues for SMP
3763 * Target mode response processing. SCI_FAILURE_UNSUPPORTED_CONTROLLER_TYPE
3764 * This value is returned if the controller does not support the supplied type.
3765 * SCI_FAILURE_UNSUPPORTED_INIT_DATA_VERSION This value is returned if the
3766 * controller does not support the supplied initialization data version.
3768 enum sci_status scic_controller_construct(struct scic_sds_controller *scic,
3769 void __iomem *scu_base,
3770 void __iomem *smu_base)
3772 u8 i;
3774 sci_base_controller_construct(&scic->parent,
3775 scic_sds_controller_state_table,
3776 scic->memory_descriptors,
3777 ARRAY_SIZE(scic->memory_descriptors), NULL);
3779 scic->scu_registers = scu_base;
3780 scic->smu_registers = smu_base;
3782 scic_sds_port_configuration_agent_construct(&scic->port_agent);
3784 /* Construct the ports for this controller */
3785 for (i = 0; i < SCI_MAX_PORTS; i++)
3786 scic_sds_port_construct(&scic->port_table[i], i, scic);
3787 scic_sds_port_construct(&scic->port_table[i], SCIC_SDS_DUMMY_PORT, scic);
3789 /* Construct the phys for this controller */
3790 for (i = 0; i < SCI_MAX_PHYS; i++) {
3791 /* Add all the PHYs to the dummy port */
3792 scic_sds_phy_construct(&scic->phy_table[i],
3793 &scic->port_table[SCI_MAX_PORTS], i);
3796 scic->invalid_phy_mask = 0;
3798 /* Set the default maximum values */
3799 scic->completion_event_entries = SCU_EVENT_COUNT;
3800 scic->completion_queue_entries = SCU_COMPLETION_QUEUE_COUNT;
3801 scic->remote_node_entries = SCI_MAX_REMOTE_DEVICES;
3802 scic->logical_port_entries = SCI_MAX_PORTS;
3803 scic->task_context_entries = SCU_IO_REQUEST_COUNT;
3804 scic->uf_control.buffers.count = SCU_UNSOLICITED_FRAME_COUNT;
3805 scic->uf_control.address_table.count = SCU_UNSOLICITED_FRAME_COUNT;
3807 /* Initialize the User and OEM parameters to default values. */
3808 scic_sds_controller_set_default_config_parameters(scic);
3810 return scic_controller_reset(scic);