2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57 #include "intel_sas.h"
58 #include "intel_sata.h"
59 #include "intel_sat.h"
60 #include "scic_controller.h"
61 #include "scic_io_request.h"
62 #include "scic_sds_controller.h"
63 #include "scu_registers.h"
64 #include "scic_sds_port.h"
65 #include "remote_device.h"
66 #include "scic_sds_request.h"
67 #include "scic_sds_smp_request.h"
68 #include "scic_sds_stp_request.h"
69 #include "scic_sds_unsolicited_frame_control.h"
70 #include "sci_environment.h"
72 #include "scu_completion_codes.h"
73 #include "scu_constants.h"
74 #include "scu_task_context.h"
77 * ****************************************************************************
78 * * SCIC SDS IO REQUEST CONSTANTS
79 * **************************************************************************** */
84 * We have no timer requirements for IO requests right now
86 #define SCIC_SDS_IO_REQUEST_MINIMUM_TIMER_COUNT (0)
87 #define SCIC_SDS_IO_REQUEST_MAXIMUM_TIMER_COUNT (0)
90 * ****************************************************************************
91 * * SCIC SDS IO REQUEST MACROS
92 * **************************************************************************** */
95 * scic_ssp_io_request_get_object_size() -
97 * This macro returns the sizeof memory required to store the an SSP IO
98 * request. This does not include the size of the SGL or SCU Task Context
101 #define scic_ssp_io_request_get_object_size() \
103 sizeof(struct sci_ssp_command_iu) \
104 + sizeof(struct sci_ssp_response_iu) \
108 * scic_sds_ssp_request_get_command_buffer() -
110 * This macro returns the address of the ssp command buffer in the io request
113 #define scic_sds_ssp_request_get_command_buffer(memory) \
114 ((struct sci_ssp_command_iu *)(\
115 ((char *)(memory)) + sizeof(struct scic_sds_request) \
119 * scic_sds_ssp_request_get_response_buffer() -
121 * This macro returns the address of the ssp response buffer in the io request
124 #define scic_sds_ssp_request_get_response_buffer(memory) \
125 ((struct sci_ssp_response_iu *)(\
126 ((char *)(scic_sds_ssp_request_get_command_buffer(memory))) \
127 + sizeof(struct sci_ssp_command_iu) \
131 * scic_sds_ssp_request_get_task_context_buffer() -
133 * This macro returns the address of the task context buffer in the io request
136 #define scic_sds_ssp_request_get_task_context_buffer(memory) \
137 ((struct scu_task_context *)(\
138 ((char *)(scic_sds_ssp_request_get_response_buffer(memory))) \
139 + sizeof(struct sci_ssp_response_iu) \
143 * scic_sds_ssp_request_get_sgl_element_buffer() -
145 * This macro returns the address of the sgl elment pairs in the io request
148 #define scic_sds_ssp_request_get_sgl_element_buffer(memory) \
149 ((struct scu_sgl_element_pair *)(\
150 ((char *)(scic_sds_ssp_request_get_task_context_buffer(memory))) \
151 + sizeof(struct scu_task_context) \
156 * scic_ssp_task_request_get_object_size() -
158 * This macro returns the sizeof of memory required to store an SSP Task
159 * request. This does not include the size of the SCU Task Context memory.
161 #define scic_ssp_task_request_get_object_size() \
163 sizeof(struct sci_ssp_task_iu) \
164 + sizeof(struct sci_ssp_response_iu) \
168 * scic_sds_ssp_task_request_get_command_buffer() -
170 * This macro returns the address of the ssp command buffer in the task request
171 * memory. Yes its the same as the above macro except for the name.
173 #define scic_sds_ssp_task_request_get_command_buffer(memory) \
174 ((struct sci_ssp_task_iu *)(\
175 ((char *)(memory)) + sizeof(struct scic_sds_request) \
179 * scic_sds_ssp_task_request_get_response_buffer() -
181 * This macro returns the address of the ssp response buffer in the task
184 #define scic_sds_ssp_task_request_get_response_buffer(memory) \
185 ((struct sci_ssp_response_iu *)(\
186 ((char *)(scic_sds_ssp_task_request_get_command_buffer(memory))) \
187 + sizeof(struct sci_ssp_task_iu) \
191 * scic_sds_ssp_task_request_get_task_context_buffer() -
193 * This macro returs the task context buffer for the SSP task request.
195 #define scic_sds_ssp_task_request_get_task_context_buffer(memory) \
196 ((struct scu_task_context *)(\
197 ((char *)(scic_sds_ssp_task_request_get_response_buffer(memory))) \
198 + sizeof(struct sci_ssp_response_iu) \
204 * ****************************************************************************
205 * * SCIC SDS IO REQUEST PRIVATE METHODS
206 * **************************************************************************** */
211 * This method returns the size required to store an SSP IO request object. u32
213 static u32
scic_sds_ssp_request_get_object_size(void)
215 return sizeof(struct scic_sds_request
)
216 + scic_ssp_io_request_get_object_size()
217 + sizeof(struct scu_task_context
)
219 + sizeof(struct scu_sgl_element_pair
) * SCU_MAX_SGL_ELEMENT_PAIRS
;
223 * This method returns the sgl element pair for the specificed sgl_pair index.
224 * @sci_req: This parameter specifies the IO request for which to retrieve
225 * the Scatter-Gather List element pair.
226 * @sgl_pair_index: This parameter specifies the index into the SGL element
227 * pair to be retrieved.
229 * This method returns a pointer to an struct scu_sgl_element_pair.
231 static struct scu_sgl_element_pair
*scic_sds_request_get_sgl_element_pair(
232 struct scic_sds_request
*sci_req
,
235 struct scu_task_context
*task_context
;
237 task_context
= (struct scu_task_context
*)sci_req
->task_context_buffer
;
239 if (sgl_pair_index
== 0) {
240 return &task_context
->sgl_pair_ab
;
241 } else if (sgl_pair_index
== 1) {
242 return &task_context
->sgl_pair_cd
;
245 return &sci_req
->sgl_element_pair_buffer
[sgl_pair_index
- 2];
249 * This function will build the SGL list for an IO request.
250 * @sci_req: This parameter specifies the IO request for which to build
251 * the Scatter-Gather List.
254 void scic_sds_request_build_sgl(struct scic_sds_request
*sds_request
)
256 struct isci_request
*isci_request
= sds_request
->ireq
;
257 struct isci_host
*isci_host
= isci_request
->isci_host
;
258 struct sas_task
*task
= isci_request_access_task(isci_request
);
259 struct scatterlist
*sg
= NULL
;
262 struct scu_sgl_element_pair
*scu_sg
= NULL
;
263 struct scu_sgl_element_pair
*prev_sg
= NULL
;
265 if (task
->num_scatter
> 0) {
269 scu_sg
= scic_sds_request_get_sgl_element_pair(
273 SCU_SGL_COPY(scu_sg
->A
, sg
);
278 SCU_SGL_COPY(scu_sg
->B
, sg
);
281 SCU_SGL_ZERO(scu_sg
->B
);
285 scic_io_request_get_dma_addr(
289 prev_sg
->next_pair_upper
=
290 upper_32_bits(dma_addr
);
291 prev_sg
->next_pair_lower
=
292 lower_32_bits(dma_addr
);
298 } else { /* handle when no sg */
299 scu_sg
= scic_sds_request_get_sgl_element_pair(sds_request
,
302 dma_addr
= dma_map_single(&isci_host
->pdev
->dev
,
304 task
->total_xfer_len
,
307 isci_request
->zero_scatter_daddr
= dma_addr
;
309 scu_sg
->A
.length
= task
->total_xfer_len
;
310 scu_sg
->A
.address_upper
= upper_32_bits(dma_addr
);
311 scu_sg
->A
.address_lower
= lower_32_bits(dma_addr
);
315 scu_sg
->next_pair_upper
= 0;
316 scu_sg
->next_pair_lower
= 0;
321 * This method build the remainder of the IO request object.
322 * @sci_req: This parameter specifies the request object being constructed.
324 * The scic_sds_general_request_construct() must be called before this call is
327 static void scic_sds_ssp_io_request_assign_buffers(
328 struct scic_sds_request
*sci_req
)
330 sci_req
->command_buffer
=
331 scic_sds_ssp_request_get_command_buffer(sci_req
);
332 sci_req
->response_buffer
=
333 scic_sds_ssp_request_get_response_buffer(sci_req
);
334 sci_req
->sgl_element_pair_buffer
=
335 scic_sds_ssp_request_get_sgl_element_buffer(sci_req
);
336 sci_req
->sgl_element_pair_buffer
=
337 PTR_ALIGN(sci_req
->sgl_element_pair_buffer
,
338 sizeof(struct scu_sgl_element_pair
));
340 if (sci_req
->was_tag_assigned_by_user
== false) {
341 sci_req
->task_context_buffer
=
342 scic_sds_ssp_request_get_task_context_buffer(sci_req
);
343 sci_req
->task_context_buffer
=
344 PTR_ALIGN(sci_req
->task_context_buffer
,
350 * This method constructs the SSP Command IU data for this io request object.
351 * @sci_req: This parameter specifies the request object for which the SSP
352 * command information unit is being built.
355 static void scic_sds_io_request_build_ssp_command_iu(
356 struct scic_sds_request
*sds_request
)
358 struct sci_ssp_command_iu
*command_frame
;
361 struct isci_request
*isci_request
= sds_request
->ireq
;
364 (struct sci_ssp_command_iu
*)sds_request
->command_buffer
;
366 command_frame
->lun_upper
= 0;
367 command_frame
->lun_lower
=
368 isci_request_ssp_io_request_get_lun(isci_request
);
370 ((u32
*)command_frame
)[2] = 0;
372 cdb_length
= isci_request_ssp_io_request_get_cdb_length(isci_request
);
373 cdb_buffer
= (u32
*)isci_request_ssp_io_request_get_cdb_address(
376 if (cdb_length
> 16) {
377 command_frame
->additional_cdb_length
= cdb_length
- 16;
380 /* / @todo Is it ok to leave junk at the end of the cdb buffer? */
381 scic_word_copy_with_swap(
382 (u32
*)(&command_frame
->cdb
),
384 (cdb_length
+ 3) / sizeof(u32
)
387 command_frame
->enable_first_burst
= 0;
388 command_frame
->task_priority
=
389 isci_request_ssp_io_request_get_command_priority(isci_request
);
390 command_frame
->task_attribute
=
391 isci_request_ssp_io_request_get_task_attribute(isci_request
);
396 * This method constructs the SSP Task IU data for this io request object.
400 static void scic_sds_task_request_build_ssp_task_iu(
401 struct scic_sds_request
*sds_request
)
403 struct sci_ssp_task_iu
*command_frame
;
404 struct isci_request
*isci_request
= sds_request
->ireq
;
407 (struct sci_ssp_task_iu
*)sds_request
->command_buffer
;
409 command_frame
->lun_upper
= 0;
410 command_frame
->lun_lower
= isci_request_ssp_io_request_get_lun(
413 ((u32
*)command_frame
)[2] = 0;
415 command_frame
->task_function
=
416 isci_task_ssp_request_get_function(isci_request
);
417 command_frame
->task_tag
=
418 isci_task_ssp_request_get_io_tag_to_manage(
424 * This method is will fill in the SCU Task Context for any type of SSP request.
429 static void scu_ssp_reqeust_construct_task_context(
430 struct scic_sds_request
*sds_request
,
431 struct scu_task_context
*task_context
)
434 struct scic_sds_controller
*controller
;
435 struct scic_sds_remote_device
*target_device
;
436 struct scic_sds_port
*target_port
;
438 controller
= scic_sds_request_get_controller(sds_request
);
439 target_device
= scic_sds_request_get_device(sds_request
);
440 target_port
= scic_sds_request_get_port(sds_request
);
442 /* Fill in the TC with the its required data */
443 task_context
->abort
= 0;
444 task_context
->priority
= 0;
445 task_context
->initiator_request
= 1;
446 task_context
->connection_rate
=
447 scic_remote_device_get_connection_rate(target_device
);
448 task_context
->protocol_engine_index
=
449 scic_sds_controller_get_protocol_engine_group(controller
);
450 task_context
->logical_port_index
=
451 scic_sds_port_get_index(target_port
);
452 task_context
->protocol_type
= SCU_TASK_CONTEXT_PROTOCOL_SSP
;
453 task_context
->valid
= SCU_TASK_CONTEXT_VALID
;
454 task_context
->context_type
= SCU_TASK_CONTEXT_TYPE
;
456 task_context
->remote_node_index
=
457 scic_sds_remote_device_get_index(sds_request
->target_device
);
458 task_context
->command_code
= 0;
460 task_context
->link_layer_control
= 0;
461 task_context
->do_not_dma_ssp_good_response
= 1;
462 task_context
->strict_ordering
= 0;
463 task_context
->control_frame
= 0;
464 task_context
->timeout_enable
= 0;
465 task_context
->block_guard_enable
= 0;
467 task_context
->address_modifier
= 0;
469 /* task_context->type.ssp.tag = sci_req->io_tag; */
470 task_context
->task_phase
= 0x01;
472 if (sds_request
->was_tag_assigned_by_user
) {
474 * Build the task context now since we have already read
477 sds_request
->post_context
=
478 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC
|
479 (scic_sds_controller_get_protocol_engine_group(
481 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT
) |
482 (scic_sds_port_get_index(target_port
) <<
483 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT
) |
484 scic_sds_io_tag_get_index(sds_request
->io_tag
));
487 * Build the task context now since we have already read
490 * I/O tag index is not assigned because we have to wait
493 sds_request
->post_context
=
494 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC
|
495 (scic_sds_controller_get_protocol_engine_group(
496 owning_controller
) <<
497 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT
) |
498 (scic_sds_port_get_index(target_port
) <<
499 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT
));
503 * Copy the physical address for the command buffer to the
506 dma_addr
= scic_io_request_get_dma_addr(sds_request
,
507 sds_request
->command_buffer
);
509 task_context
->command_iu_upper
= upper_32_bits(dma_addr
);
510 task_context
->command_iu_lower
= lower_32_bits(dma_addr
);
513 * Copy the physical address for the response buffer to the
516 dma_addr
= scic_io_request_get_dma_addr(sds_request
,
517 sds_request
->response_buffer
);
519 task_context
->response_iu_upper
= upper_32_bits(dma_addr
);
520 task_context
->response_iu_lower
= lower_32_bits(dma_addr
);
524 * This method is will fill in the SCU Task Context for a SSP IO request.
528 static void scu_ssp_io_request_construct_task_context(
529 struct scic_sds_request
*sci_req
,
530 enum dma_data_direction dir
,
533 struct scu_task_context
*task_context
;
535 task_context
= scic_sds_request_get_task_context(sci_req
);
537 scu_ssp_reqeust_construct_task_context(sci_req
, task_context
);
539 task_context
->ssp_command_iu_length
= sizeof(struct sci_ssp_command_iu
) / sizeof(u32
);
540 task_context
->type
.ssp
.frame_type
= SCI_SAS_COMMAND_FRAME
;
543 case DMA_FROM_DEVICE
:
546 task_context
->task_type
= SCU_TASK_TYPE_IOREAD
;
549 task_context
->task_type
= SCU_TASK_TYPE_IOWRITE
;
553 task_context
->transfer_length_bytes
= len
;
555 if (task_context
->transfer_length_bytes
> 0)
556 scic_sds_request_build_sgl(sci_req
);
561 * This method will fill in the remainder of the io request object for SSP Task
566 static void scic_sds_ssp_task_request_assign_buffers(
567 struct scic_sds_request
*sci_req
)
569 /* Assign all of the buffer pointers */
570 sci_req
->command_buffer
=
571 scic_sds_ssp_task_request_get_command_buffer(sci_req
);
572 sci_req
->response_buffer
=
573 scic_sds_ssp_task_request_get_response_buffer(sci_req
);
574 sci_req
->sgl_element_pair_buffer
= NULL
;
576 if (sci_req
->was_tag_assigned_by_user
== false) {
577 sci_req
->task_context_buffer
=
578 scic_sds_ssp_task_request_get_task_context_buffer(sci_req
);
579 sci_req
->task_context_buffer
=
580 PTR_ALIGN(sci_req
->task_context_buffer
, SMP_CACHE_BYTES
);
585 * This method will fill in the SCU Task Context for a SSP Task request. The
586 * following important settings are utilized: -# priority ==
587 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
588 * ahead of other task destined for the same Remote Node. -# task_type ==
589 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
590 * (i.e. non-raw frame) is being utilized to perform task management. -#
591 * control_frame == 1. This ensures that the proper endianess is set so
592 * that the bytes are transmitted in the right order for a task frame.
593 * @sci_req: This parameter specifies the task request object being
597 static void scu_ssp_task_request_construct_task_context(
598 struct scic_sds_request
*sci_req
)
600 struct scu_task_context
*task_context
;
602 task_context
= scic_sds_request_get_task_context(sci_req
);
604 scu_ssp_reqeust_construct_task_context(sci_req
, task_context
);
606 task_context
->control_frame
= 1;
607 task_context
->priority
= SCU_TASK_PRIORITY_HIGH
;
608 task_context
->task_type
= SCU_TASK_TYPE_RAW_FRAME
;
609 task_context
->transfer_length_bytes
= 0;
610 task_context
->type
.ssp
.frame_type
= SCI_SAS_TASK_FRAME
;
611 task_context
->ssp_command_iu_length
= sizeof(struct sci_ssp_task_iu
) / sizeof(u32
);
616 * This method constructs the SSP Command IU data for this ssp passthrough
617 * comand request object.
618 * @sci_req: This parameter specifies the request object for which the SSP
619 * command information unit is being built.
621 * enum sci_status, returns invalid parameter is cdb > 16
626 * This method constructs the SATA request object.
635 static enum sci_status
scic_io_request_construct_sata(struct scic_sds_request
*sci_req
,
637 enum dma_data_direction dir
,
640 enum sci_status status
= SCI_SUCCESS
;
643 case SAT_PROTOCOL_PIO_DATA_IN
:
644 case SAT_PROTOCOL_PIO_DATA_OUT
:
645 status
= scic_sds_stp_pio_request_construct(sci_req
, proto
, copy
);
648 case SAT_PROTOCOL_UDMA_DATA_IN
:
649 case SAT_PROTOCOL_UDMA_DATA_OUT
:
650 status
= scic_sds_stp_udma_request_construct(sci_req
, len
, dir
);
653 case SAT_PROTOCOL_ATA_HARD_RESET
:
654 case SAT_PROTOCOL_SOFT_RESET
:
655 status
= scic_sds_stp_soft_reset_request_construct(sci_req
);
658 case SAT_PROTOCOL_NON_DATA
:
659 status
= scic_sds_stp_non_data_request_construct(sci_req
);
662 case SAT_PROTOCOL_FPDMA
:
663 status
= scic_sds_stp_ncq_request_construct(sci_req
, len
, dir
);
666 case SAT_PROTOCOL_DMA_QUEUED
:
667 case SAT_PROTOCOL_DMA
:
668 case SAT_PROTOCOL_DEVICE_DIAGNOSTIC
:
669 case SAT_PROTOCOL_DEVICE_RESET
:
670 case SAT_PROTOCOL_RETURN_RESPONSE_INFO
:
672 dev_err(scic_to_dev(sci_req
->owning_controller
),
673 "%s: SCIC IO Request 0x%p received un-handled "
675 __func__
, sci_req
, proto
);
677 status
= SCI_FAILURE
;
684 u32
scic_io_request_get_object_size(void)
686 u32 ssp_request_size
;
687 u32 stp_request_size
;
688 u32 smp_request_size
;
690 ssp_request_size
= scic_sds_ssp_request_get_object_size();
691 stp_request_size
= scic_sds_stp_request_get_object_size();
692 smp_request_size
= scic_sds_smp_request_get_object_size();
694 return max(ssp_request_size
, max(stp_request_size
, smp_request_size
));
697 enum sci_status
scic_io_request_construct_basic_ssp(
698 struct scic_sds_request
*sci_req
)
700 struct isci_request
*isci_request
= sci_req
->ireq
;
702 sci_req
->protocol
= SCIC_SSP_PROTOCOL
;
704 scu_ssp_io_request_construct_task_context(
706 isci_request_io_request_get_data_direction(isci_request
),
707 isci_request_io_request_get_transfer_length(isci_request
));
709 scic_sds_io_request_build_ssp_command_iu(sci_req
);
711 sci_base_state_machine_change_state(&sci_req
->state_machine
,
712 SCI_BASE_REQUEST_STATE_CONSTRUCTED
);
718 enum sci_status
scic_task_request_construct_ssp(
719 struct scic_sds_request
*sci_req
)
721 /* Construct the SSP Task SCU Task Context */
722 scu_ssp_task_request_construct_task_context(sci_req
);
724 /* Fill in the SSP Task IU */
725 scic_sds_task_request_build_ssp_task_iu(sci_req
);
727 sci_base_state_machine_change_state(&sci_req
->state_machine
,
728 SCI_BASE_REQUEST_STATE_CONSTRUCTED
);
734 enum sci_status
scic_io_request_construct_basic_sata(
735 struct scic_sds_request
*sci_req
)
737 enum sci_status status
;
738 struct scic_sds_stp_request
*stp_req
;
741 enum dma_data_direction dir
;
743 struct isci_request
*isci_request
= sci_req
->ireq
;
744 struct sas_task
*task
= isci_request_access_task(isci_request
);
746 stp_req
= container_of(sci_req
, typeof(*stp_req
), parent
);
748 sci_req
->protocol
= SCIC_STP_PROTOCOL
;
750 len
= isci_request_io_request_get_transfer_length(isci_request
);
751 dir
= isci_request_io_request_get_data_direction(isci_request
);
752 proto
= isci_sata_get_sat_protocol(isci_request
);
753 copy
= (task
->data_dir
== DMA_NONE
) ? false : true;
755 status
= scic_io_request_construct_sata(sci_req
, proto
, len
, dir
, copy
);
757 if (status
== SCI_SUCCESS
)
758 sci_base_state_machine_change_state(&sci_req
->state_machine
,
759 SCI_BASE_REQUEST_STATE_CONSTRUCTED
);
765 enum sci_status
scic_task_request_construct_sata(
766 struct scic_sds_request
*sci_req
)
768 enum sci_status status
;
770 struct isci_request
*isci_request
= sci_req
->ireq
;
772 sat_protocol
= isci_sata_get_sat_protocol(isci_request
);
774 switch (sat_protocol
) {
775 case SAT_PROTOCOL_ATA_HARD_RESET
:
776 case SAT_PROTOCOL_SOFT_RESET
:
777 status
= scic_sds_stp_soft_reset_request_construct(sci_req
);
781 dev_err(scic_to_dev(sci_req
->owning_controller
),
782 "%s: SCIC IO Request 0x%p received un-handled SAT "
788 status
= SCI_FAILURE
;
792 if (status
== SCI_SUCCESS
)
793 sci_base_state_machine_change_state(&sci_req
->state_machine
,
794 SCI_BASE_REQUEST_STATE_CONSTRUCTED
);
800 u16
scic_io_request_get_io_tag(
801 struct scic_sds_request
*sci_req
)
803 return sci_req
->io_tag
;
807 u32
scic_request_get_controller_status(
808 struct scic_sds_request
*sci_req
)
810 return sci_req
->scu_status
;
814 void *scic_io_request_get_command_iu_address(
815 struct scic_sds_request
*sci_req
)
817 return sci_req
->command_buffer
;
821 void *scic_io_request_get_response_iu_address(
822 struct scic_sds_request
*sci_req
)
824 return sci_req
->response_buffer
;
828 #define SCU_TASK_CONTEXT_SRAM 0x200000
829 u32
scic_io_request_get_number_of_bytes_transferred(
830 struct scic_sds_request
*scic_sds_request
)
832 struct scic_sds_controller
*scic
= scic_sds_request
->owning_controller
;
835 if (readl(&scic
->smu_registers
->address_modifier
) == 0) {
836 void __iomem
*scu_reg_base
= scic
->scu_registers
;
838 * get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
839 * BAR1 is the scu_registers
840 * 0x20002C = 0x200000 + 0x2c
841 * = start of task context SRAM + offset of (type.ssp.data_offset)
842 * TCi is the io_tag of struct scic_sds_request */
843 ret_val
= readl(scu_reg_base
+
844 (SCU_TASK_CONTEXT_SRAM
+ offsetof(struct scu_task_context
, type
.ssp
.data_offset
)) +
845 ((sizeof(struct scu_task_context
)) * scic_sds_io_tag_get_index(scic_sds_request
->io_tag
)));
853 * ****************************************************************************
854 * * SCIC SDS Interface Implementation
855 * **************************************************************************** */
858 scic_sds_request_start(struct scic_sds_request
*request
)
860 if (request
->device_sequence
!=
861 scic_sds_remote_device_get_sequence(request
->target_device
))
864 if (request
->state_handlers
->start_handler
)
865 return request
->state_handlers
->start_handler(request
);
867 dev_warn(scic_to_dev(request
->owning_controller
),
868 "%s: SCIC IO Request requested to start while in wrong "
871 sci_base_state_machine_get_state(&request
->state_machine
));
873 return SCI_FAILURE_INVALID_STATE
;
877 scic_sds_io_request_terminate(struct scic_sds_request
*request
)
879 if (request
->state_handlers
->abort_handler
)
880 return request
->state_handlers
->abort_handler(request
);
882 dev_warn(scic_to_dev(request
->owning_controller
),
883 "%s: SCIC IO Request requested to abort while in wrong "
886 sci_base_state_machine_get_state(&request
->state_machine
));
888 return SCI_FAILURE_INVALID_STATE
;
892 scic_sds_io_request_complete(struct scic_sds_request
*request
)
894 if (request
->state_handlers
->complete_handler
)
895 return request
->state_handlers
->complete_handler(request
);
897 dev_warn(scic_to_dev(request
->owning_controller
),
898 "%s: SCIC IO Request requested to complete while in wrong "
901 sci_base_state_machine_get_state(&request
->state_machine
));
903 return SCI_FAILURE_INVALID_STATE
;
906 enum sci_status
scic_sds_io_request_event_handler(
907 struct scic_sds_request
*request
,
910 if (request
->state_handlers
->event_handler
)
911 return request
->state_handlers
->event_handler(request
, event_code
);
913 dev_warn(scic_to_dev(request
->owning_controller
),
914 "%s: SCIC IO Request given event code notification %x while "
915 "in wrong state %d\n",
918 sci_base_state_machine_get_state(&request
->state_machine
));
920 return SCI_FAILURE_INVALID_STATE
;
924 scic_sds_io_request_tc_completion(struct scic_sds_request
*request
, u32 completion_code
)
926 if (request
->state_machine
.current_state_id
== SCI_BASE_REQUEST_STATE_STARTED
&&
927 request
->has_started_substate_machine
== false)
928 return scic_sds_request_started_state_tc_completion_handler(request
, completion_code
);
929 else if (request
->state_handlers
->tc_completion_handler
)
930 return request
->state_handlers
->tc_completion_handler(request
, completion_code
);
932 dev_warn(scic_to_dev(request
->owning_controller
),
933 "%s: SCIC IO Request given task completion notification %x "
934 "while in wrong state %d\n",
937 sci_base_state_machine_get_state(&request
->state_machine
));
939 return SCI_FAILURE_INVALID_STATE
;
946 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the start
947 * operation is to be executed.
948 * @frame_index: The frame index returned by the hardware for the reqeust
951 * This method invokes the core state frame handler for the
952 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
954 enum sci_status
scic_sds_io_request_frame_handler(
955 struct scic_sds_request
*request
,
958 if (request
->state_handlers
->frame_handler
)
959 return request
->state_handlers
->frame_handler(request
, frame_index
);
961 dev_warn(scic_to_dev(request
->owning_controller
),
962 "%s: SCIC IO Request given unexpected frame %x while in "
966 sci_base_state_machine_get_state(&request
->state_machine
));
968 scic_sds_controller_release_frame(request
->owning_controller
, frame_index
);
969 return SCI_FAILURE_INVALID_STATE
;
974 * @sci_req: The SCIC_SDS_IO_REQUEST_T object for which the task start
975 * operation is to be executed.
977 * This method invokes the core state task complete handler for the
978 * SCIC_SDS_IO_REQUEST_T object. enum sci_status
982 * ****************************************************************************
983 * * SCIC SDS PROTECTED METHODS
984 * **************************************************************************** */
987 * This method copies response data for requests returning response data
988 * instead of sense data.
989 * @sci_req: This parameter specifies the request object for which to copy
993 void scic_sds_io_request_copy_response(struct scic_sds_request
*sds_request
)
995 void *response_buffer
;
996 u32 user_response_length
;
997 u32 core_response_length
;
998 struct sci_ssp_response_iu
*ssp_response
;
999 struct isci_request
*isci_request
= sds_request
->ireq
;
1002 (struct sci_ssp_response_iu
*)sds_request
->response_buffer
;
1005 isci_task_ssp_request_get_response_data_address(
1008 user_response_length
=
1009 isci_task_ssp_request_get_response_data_length(
1012 core_response_length
= sci_ssp_get_response_data_length(
1013 ssp_response
->response_data_length
);
1015 user_response_length
= min(user_response_length
, core_response_length
);
1017 memcpy(response_buffer
, ssp_response
->data
, user_response_length
);
1021 * *****************************************************************************
1022 * * CONSTRUCTED STATE HANDLERS
1023 * ***************************************************************************** */
1026 * This method implements the action taken when a constructed
1027 * SCIC_SDS_IO_REQUEST_T object receives a scic_sds_request_start() request.
1028 * This method will, if necessary, allocate a TCi for the io request object and
1029 * then will, if necessary, copy the constructed TC data into the actual TC
1030 * buffer. If everything is successful the post context field is updated with
1031 * the TCi so the controller can post the request to the hardware. enum sci_status
1032 * SCI_SUCCESS SCI_FAILURE_INSUFFICIENT_RESOURCES
1034 static enum sci_status
scic_sds_request_constructed_state_start_handler(
1035 struct scic_sds_request
*request
)
1037 struct scu_task_context
*task_context
;
1039 if (request
->io_tag
== SCI_CONTROLLER_INVALID_IO_TAG
) {
1041 scic_controller_allocate_io_tag(request
->owning_controller
);
1044 /* Record the IO Tag in the request */
1045 if (request
->io_tag
!= SCI_CONTROLLER_INVALID_IO_TAG
) {
1046 task_context
= request
->task_context_buffer
;
1048 task_context
->task_index
= scic_sds_io_tag_get_index(request
->io_tag
);
1050 switch (task_context
->protocol_type
) {
1051 case SCU_TASK_CONTEXT_PROTOCOL_SMP
:
1052 case SCU_TASK_CONTEXT_PROTOCOL_SSP
:
1054 task_context
->type
.ssp
.tag
= request
->io_tag
;
1055 task_context
->type
.ssp
.target_port_transfer_tag
= 0xFFFF;
1058 case SCU_TASK_CONTEXT_PROTOCOL_STP
:
1061 * task_context->type.stp.ncq_tag = request->ncq_tag; */
1064 case SCU_TASK_CONTEXT_PROTOCOL_NONE
:
1065 /* / @todo When do we set no protocol type? */
1069 /* This should never happen since we build the IO requests */
1074 * Check to see if we need to copy the task context buffer
1075 * or have been building into the task context buffer */
1076 if (request
->was_tag_assigned_by_user
== false) {
1077 scic_sds_controller_copy_task_context(
1078 request
->owning_controller
, request
);
1081 /* Add to the post_context the io tag value */
1082 request
->post_context
|= scic_sds_io_tag_get_index(request
->io_tag
);
1084 /* Everything is good go ahead and change state */
1085 sci_base_state_machine_change_state(&request
->state_machine
,
1086 SCI_BASE_REQUEST_STATE_STARTED
);
1091 return SCI_FAILURE_INSUFFICIENT_RESOURCES
;
1095 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1096 * object receives a scic_sds_request_terminate() request. Since the request
1097 * has not yet been posted to the hardware the request transitions to the
1098 * completed state. enum sci_status SCI_SUCCESS
1100 static enum sci_status
scic_sds_request_constructed_state_abort_handler(
1101 struct scic_sds_request
*request
)
1104 * This request has been terminated by the user make sure that the correct
1105 * status code is returned */
1106 scic_sds_request_set_status(request
,
1107 SCU_TASK_DONE_TASK_ABORT
,
1108 SCI_FAILURE_IO_TERMINATED
);
1110 sci_base_state_machine_change_state(&request
->state_machine
,
1111 SCI_BASE_REQUEST_STATE_COMPLETED
);
1116 * *****************************************************************************
1117 * * STARTED STATE HANDLERS
1118 * ***************************************************************************** */
1121 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1122 * object receives a scic_sds_request_terminate() request. Since the request
1123 * has been posted to the hardware the io request state is changed to the
1124 * aborting state. enum sci_status SCI_SUCCESS
1126 enum sci_status
scic_sds_request_started_state_abort_handler(
1127 struct scic_sds_request
*request
)
1129 if (request
->has_started_substate_machine
)
1130 sci_base_state_machine_stop(&request
->started_substate_machine
);
1132 sci_base_state_machine_change_state(&request
->state_machine
,
1133 SCI_BASE_REQUEST_STATE_ABORTING
);
1138 * scic_sds_request_started_state_tc_completion_handler() - This method process
1139 * TC (task context) completions for normal IO request (i.e. Task/Abort
1140 * Completions of type 0). This method will update the
1141 * SCIC_SDS_IO_REQUEST_T::status field.
1142 * @sci_req: This parameter specifies the request for which a completion
1144 * @completion_code: This parameter specifies the completion code received from
1148 enum sci_status
scic_sds_request_started_state_tc_completion_handler(
1149 struct scic_sds_request
*sci_req
,
1150 u32 completion_code
)
1153 struct sci_ssp_response_iu
*response_buffer
;
1156 * @todo Any SDMA return code of other than 0 is bad
1157 * decode 0x003C0000 to determine SDMA status
1159 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
1160 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD
):
1161 scic_sds_request_set_status(
1162 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
1166 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP
):
1169 * There are times when the SCU hardware will return an early response
1170 * because the io request specified more data than is returned by the
1171 * target device (mode pages, inquiry data, etc.). We must check the
1172 * response stats to see if this is truly a failed request or a good
1173 * request that just got completed early. */
1174 struct sci_ssp_response_iu
*response
= (struct sci_ssp_response_iu
*)
1175 sci_req
->response_buffer
;
1176 scic_word_copy_with_swap(
1177 sci_req
->response_buffer
,
1178 sci_req
->response_buffer
,
1179 sizeof(struct sci_ssp_response_iu
) / sizeof(u32
)
1182 if (response
->status
== 0) {
1183 scic_sds_request_set_status(
1184 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS_IO_DONE_EARLY
1187 scic_sds_request_set_status(
1189 SCU_TASK_DONE_CHECK_RESPONSE
,
1190 SCI_FAILURE_IO_RESPONSE_VALID
1196 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE
):
1197 scic_word_copy_with_swap(
1198 sci_req
->response_buffer
,
1199 sci_req
->response_buffer
,
1200 sizeof(struct sci_ssp_response_iu
) / sizeof(u32
)
1203 scic_sds_request_set_status(
1205 SCU_TASK_DONE_CHECK_RESPONSE
,
1206 SCI_FAILURE_IO_RESPONSE_VALID
1210 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR
):
1212 * / @todo With TASK_DONE_RESP_LEN_ERR is the response frame guaranteed
1213 * / to be received before this completion status is posted? */
1215 (struct sci_ssp_response_iu
*)sci_req
->response_buffer
;
1217 response_buffer
->data_present
& SCI_SSP_RESPONSE_IU_DATA_PRESENT_MASK
;
1219 if ((data_present
== 0x01) || (data_present
== 0x02)) {
1220 scic_sds_request_set_status(
1222 SCU_TASK_DONE_CHECK_RESPONSE
,
1223 SCI_FAILURE_IO_RESPONSE_VALID
1226 scic_sds_request_set_status(
1227 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
1232 /* only stp device gets suspended. */
1233 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO
):
1234 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR
):
1235 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR
):
1236 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR
):
1237 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR
):
1238 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN
):
1239 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR
):
1240 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP
):
1241 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS
):
1242 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR
):
1243 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR
):
1244 if (sci_req
->protocol
== SCIC_STP_PROTOCOL
) {
1245 scic_sds_request_set_status(
1247 SCU_GET_COMPLETION_TL_STATUS(completion_code
) >> SCU_COMPLETION_TL_STATUS_SHIFT
,
1248 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED
1251 scic_sds_request_set_status(
1253 SCU_GET_COMPLETION_TL_STATUS(completion_code
) >> SCU_COMPLETION_TL_STATUS_SHIFT
,
1254 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1259 /* both stp/ssp device gets suspended */
1260 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR
):
1261 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION
):
1262 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1
):
1263 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2
):
1264 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3
):
1265 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION
):
1266 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION
):
1267 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY
):
1268 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED
):
1269 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED
):
1270 scic_sds_request_set_status(
1272 SCU_GET_COMPLETION_TL_STATUS(completion_code
) >> SCU_COMPLETION_TL_STATUS_SHIFT
,
1273 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED
1277 /* neither ssp nor stp gets suspended. */
1278 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR
):
1279 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR
):
1280 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR
):
1281 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR
):
1282 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR
):
1283 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA
):
1284 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR
):
1285 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR
):
1286 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR
):
1287 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR
):
1288 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA
):
1289 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL
):
1290 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV
):
1291 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV
):
1292 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND
):
1294 scic_sds_request_set_status(
1296 SCU_GET_COMPLETION_TL_STATUS(completion_code
) >> SCU_COMPLETION_TL_STATUS_SHIFT
,
1297 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1303 * @todo This is probably wrong for ACK/NAK timeout conditions
1306 /* In all cases we will treat this as the completion of the IO request. */
1307 sci_base_state_machine_change_state(&sci_req
->state_machine
,
1308 SCI_BASE_REQUEST_STATE_COMPLETED
);
1313 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1314 * object receives a scic_sds_request_frame_handler() request. This method
1315 * first determines the frame type received. If this is a response frame then
1316 * the response data is copied to the io request response buffer for processing
1317 * at completion time. If the frame type is not a response buffer an error is
1318 * logged. enum sci_status SCI_SUCCESS SCI_FAILURE_INVALID_PARAMETER_VALUE
1320 static enum sci_status
scic_sds_request_started_state_frame_handler(
1321 struct scic_sds_request
*sci_req
,
1324 enum sci_status status
;
1325 struct sci_ssp_frame_header
*frame_header
;
1327 /* / @todo If this is a response frame we must record that we received it */
1328 status
= scic_sds_unsolicited_frame_control_get_header(
1329 &(scic_sds_request_get_controller(sci_req
)->uf_control
),
1331 (void **)&frame_header
1334 if (frame_header
->frame_type
== SCI_SAS_RESPONSE_FRAME
) {
1335 struct sci_ssp_response_iu
*response_buffer
;
1337 status
= scic_sds_unsolicited_frame_control_get_buffer(
1338 &(scic_sds_request_get_controller(sci_req
)->uf_control
),
1340 (void **)&response_buffer
1343 scic_word_copy_with_swap(
1344 sci_req
->response_buffer
,
1345 (u32
*)response_buffer
,
1346 sizeof(struct sci_ssp_response_iu
)
1349 response_buffer
= (struct sci_ssp_response_iu
*)sci_req
->response_buffer
;
1351 if ((response_buffer
->data_present
== 0x01) ||
1352 (response_buffer
->data_present
== 0x02)) {
1353 scic_sds_request_set_status(
1355 SCU_TASK_DONE_CHECK_RESPONSE
,
1356 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1359 scic_sds_request_set_status(
1360 sci_req
, SCU_TASK_DONE_GOOD
, SCI_SUCCESS
1363 /* This was not a response frame why did it get forwarded? */
1364 dev_err(scic_to_dev(sci_req
->owning_controller
),
1365 "%s: SCIC IO Request 0x%p received unexpected "
1366 "frame %d type 0x%02x\n",
1370 frame_header
->frame_type
);
1373 * In any case we are done with this frame buffer return it to the
1375 scic_sds_controller_release_frame(
1376 sci_req
->owning_controller
, frame_index
1383 * *****************************************************************************
1384 * * COMPLETED STATE HANDLERS
1385 * ***************************************************************************** */
1389 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1390 * object receives a scic_sds_request_complete() request. This method frees up
1391 * any io request resources that have been allocated and transitions the
1392 * request to its final state. Consider stopping the state machine instead of
1393 * transitioning to the final state? enum sci_status SCI_SUCCESS
1395 static enum sci_status
scic_sds_request_completed_state_complete_handler(
1396 struct scic_sds_request
*request
)
1398 if (request
->was_tag_assigned_by_user
!= true) {
1399 scic_controller_free_io_tag(
1400 request
->owning_controller
, request
->io_tag
);
1403 if (request
->saved_rx_frame_index
!= SCU_INVALID_FRAME_INDEX
) {
1404 scic_sds_controller_release_frame(
1405 request
->owning_controller
, request
->saved_rx_frame_index
);
1408 sci_base_state_machine_change_state(&request
->state_machine
,
1409 SCI_BASE_REQUEST_STATE_FINAL
);
1414 * *****************************************************************************
1415 * * ABORTING STATE HANDLERS
1416 * ***************************************************************************** */
1419 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1420 * object receives a scic_sds_request_terminate() request. This method is the
1421 * io request aborting state abort handlers. On receipt of a multiple
1422 * terminate requests the io request will transition to the completed state.
1423 * This should not happen in normal operation. enum sci_status SCI_SUCCESS
1425 static enum sci_status
scic_sds_request_aborting_state_abort_handler(
1426 struct scic_sds_request
*request
)
1428 sci_base_state_machine_change_state(&request
->state_machine
,
1429 SCI_BASE_REQUEST_STATE_COMPLETED
);
1434 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1435 * object receives a scic_sds_request_task_completion() request. This method
1436 * decodes the completion type waiting for the abort task complete
1437 * notification. When the abort task complete is received the io request
1438 * transitions to the completed state. enum sci_status SCI_SUCCESS
1440 static enum sci_status
scic_sds_request_aborting_state_tc_completion_handler(
1441 struct scic_sds_request
*sci_req
,
1442 u32 completion_code
)
1444 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code
)) {
1445 case (SCU_TASK_DONE_GOOD
<< SCU_COMPLETION_TL_STATUS_SHIFT
):
1446 case (SCU_TASK_DONE_TASK_ABORT
<< SCU_COMPLETION_TL_STATUS_SHIFT
):
1447 scic_sds_request_set_status(
1448 sci_req
, SCU_TASK_DONE_TASK_ABORT
, SCI_FAILURE_IO_TERMINATED
1451 sci_base_state_machine_change_state(&sci_req
->state_machine
,
1452 SCI_BASE_REQUEST_STATE_COMPLETED
);
1457 * Unless we get some strange error wait for the task abort to complete
1458 * TODO: Should there be a state change for this completion? */
1466 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
1467 * object receives a scic_sds_request_frame_handler() request. This method
1468 * discards the unsolicited frame since we are waiting for the abort task
1469 * completion. enum sci_status SCI_SUCCESS
1471 static enum sci_status
scic_sds_request_aborting_state_frame_handler(
1472 struct scic_sds_request
*sci_req
,
1475 /* TODO: Is it even possible to get an unsolicited frame in the aborting state? */
1477 scic_sds_controller_release_frame(
1478 sci_req
->owning_controller
, frame_index
);
1483 static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table
[] = {
1484 [SCI_BASE_REQUEST_STATE_INITIAL
] = {
1486 [SCI_BASE_REQUEST_STATE_CONSTRUCTED
] = {
1487 .start_handler
= scic_sds_request_constructed_state_start_handler
,
1488 .abort_handler
= scic_sds_request_constructed_state_abort_handler
,
1490 [SCI_BASE_REQUEST_STATE_STARTED
] = {
1491 .abort_handler
= scic_sds_request_started_state_abort_handler
,
1492 .tc_completion_handler
= scic_sds_request_started_state_tc_completion_handler
,
1493 .frame_handler
= scic_sds_request_started_state_frame_handler
,
1495 [SCI_BASE_REQUEST_STATE_COMPLETED
] = {
1496 .complete_handler
= scic_sds_request_completed_state_complete_handler
,
1498 [SCI_BASE_REQUEST_STATE_ABORTING
] = {
1499 .abort_handler
= scic_sds_request_aborting_state_abort_handler
,
1500 .tc_completion_handler
= scic_sds_request_aborting_state_tc_completion_handler
,
1501 .frame_handler
= scic_sds_request_aborting_state_frame_handler
,
1503 [SCI_BASE_REQUEST_STATE_FINAL
] = {
1508 * scic_sds_request_initial_state_enter() -
1509 * @object: This parameter specifies the base object for which the state
1510 * transition is occurring.
1512 * This method implements the actions taken when entering the
1513 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
1514 * base request is constructed. Entry into the initial state sets all handlers
1515 * for the io request object to their default handlers. none
1517 static void scic_sds_request_initial_state_enter(void *object
)
1519 struct scic_sds_request
*sci_req
= object
;
1523 scic_sds_request_state_handler_table
,
1524 SCI_BASE_REQUEST_STATE_INITIAL
1529 * scic_sds_request_constructed_state_enter() -
1530 * @object: The io request object that is to enter the constructed state.
1532 * This method implements the actions taken when entering the
1533 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
1534 * for the the constructed state. none
1536 static void scic_sds_request_constructed_state_enter(void *object
)
1538 struct scic_sds_request
*sci_req
= object
;
1542 scic_sds_request_state_handler_table
,
1543 SCI_BASE_REQUEST_STATE_CONSTRUCTED
1548 * scic_sds_request_started_state_enter() -
1549 * @object: This parameter specifies the base object for which the state
1550 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
1552 * This method implements the actions taken when entering the
1553 * SCI_BASE_REQUEST_STATE_STARTED state. If the io request object type is a
1554 * SCSI Task request we must enter the started substate machine. none
1556 static void scic_sds_request_started_state_enter(void *object
)
1558 struct scic_sds_request
*sci_req
= object
;
1562 scic_sds_request_state_handler_table
,
1563 SCI_BASE_REQUEST_STATE_STARTED
1567 * Most of the request state machines have a started substate machine so
1568 * start its execution on the entry to the started state. */
1569 if (sci_req
->has_started_substate_machine
== true)
1570 sci_base_state_machine_start(&sci_req
->started_substate_machine
);
1574 * scic_sds_request_started_state_exit() -
1575 * @object: This parameter specifies the base object for which the state
1576 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
1579 * This method implements the actions taken when exiting the
1580 * SCI_BASE_REQUEST_STATE_STARTED state. For task requests the action will be
1581 * to stop the started substate machine. none
1583 static void scic_sds_request_started_state_exit(void *object
)
1585 struct scic_sds_request
*sci_req
= object
;
1587 if (sci_req
->has_started_substate_machine
== true)
1588 sci_base_state_machine_stop(&sci_req
->started_substate_machine
);
1592 * scic_sds_request_completed_state_enter() -
1593 * @object: This parameter specifies the base object for which the state
1594 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
1597 * This method implements the actions taken when entering the
1598 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
1599 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
1600 * completion status and convert it to an enum sci_status to return in the
1601 * completion callback function. none
1603 static void scic_sds_request_completed_state_enter(void *object
)
1605 struct scic_sds_request
*sci_req
= object
;
1606 struct scic_sds_controller
*scic
=
1607 scic_sds_request_get_controller(sci_req
);
1608 struct isci_host
*ihost
= scic
->ihost
;
1609 struct isci_request
*ireq
= sci_req
->ireq
;
1611 SET_STATE_HANDLER(sci_req
,
1612 scic_sds_request_state_handler_table
,
1613 SCI_BASE_REQUEST_STATE_COMPLETED
);
1615 /* Tell the SCI_USER that the IO request is complete */
1616 if (sci_req
->is_task_management_request
== false)
1617 isci_request_io_request_complete(ihost
,
1619 sci_req
->sci_status
);
1621 isci_task_request_complete(ihost
, ireq
, sci_req
->sci_status
);
1625 * scic_sds_request_aborting_state_enter() -
1626 * @object: This parameter specifies the base object for which the state
1627 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
1630 * This method implements the actions taken when entering the
1631 * SCI_BASE_REQUEST_STATE_ABORTING state. none
1633 static void scic_sds_request_aborting_state_enter(void *object
)
1635 struct scic_sds_request
*sci_req
= object
;
1637 /* Setting the abort bit in the Task Context is required by the silicon. */
1638 sci_req
->task_context_buffer
->abort
= 1;
1642 scic_sds_request_state_handler_table
,
1643 SCI_BASE_REQUEST_STATE_ABORTING
1648 * scic_sds_request_final_state_enter() -
1649 * @object: This parameter specifies the base object for which the state
1650 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
1652 * This method implements the actions taken when entering the
1653 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
1654 * state handlers in place. none
1656 static void scic_sds_request_final_state_enter(void *object
)
1658 struct scic_sds_request
*sci_req
= object
;
1662 scic_sds_request_state_handler_table
,
1663 SCI_BASE_REQUEST_STATE_FINAL
1667 static const struct sci_base_state scic_sds_request_state_table
[] = {
1668 [SCI_BASE_REQUEST_STATE_INITIAL
] = {
1669 .enter_state
= scic_sds_request_initial_state_enter
,
1671 [SCI_BASE_REQUEST_STATE_CONSTRUCTED
] = {
1672 .enter_state
= scic_sds_request_constructed_state_enter
,
1674 [SCI_BASE_REQUEST_STATE_STARTED
] = {
1675 .enter_state
= scic_sds_request_started_state_enter
,
1676 .exit_state
= scic_sds_request_started_state_exit
1678 [SCI_BASE_REQUEST_STATE_COMPLETED
] = {
1679 .enter_state
= scic_sds_request_completed_state_enter
,
1681 [SCI_BASE_REQUEST_STATE_ABORTING
] = {
1682 .enter_state
= scic_sds_request_aborting_state_enter
,
1684 [SCI_BASE_REQUEST_STATE_FINAL
] = {
1685 .enter_state
= scic_sds_request_final_state_enter
,
1689 static void scic_sds_general_request_construct(struct scic_sds_controller
*scic
,
1690 struct scic_sds_remote_device
*sci_dev
,
1692 void *user_io_request_object
,
1693 struct scic_sds_request
*sci_req
)
1695 sci_base_state_machine_construct(&sci_req
->state_machine
, sci_req
,
1696 scic_sds_request_state_table
, SCI_BASE_REQUEST_STATE_INITIAL
);
1697 sci_base_state_machine_start(&sci_req
->state_machine
);
1699 sci_req
->io_tag
= io_tag
;
1700 sci_req
->user_request
= user_io_request_object
;
1701 sci_req
->owning_controller
= scic
;
1702 sci_req
->target_device
= sci_dev
;
1703 sci_req
->has_started_substate_machine
= false;
1704 sci_req
->protocol
= SCIC_NO_PROTOCOL
;
1705 sci_req
->saved_rx_frame_index
= SCU_INVALID_FRAME_INDEX
;
1706 sci_req
->device_sequence
= scic_sds_remote_device_get_sequence(sci_dev
);
1708 sci_req
->sci_status
= SCI_SUCCESS
;
1709 sci_req
->scu_status
= 0;
1710 sci_req
->post_context
= 0xFFFFFFFF;
1712 sci_req
->is_task_management_request
= false;
1714 if (io_tag
== SCI_CONTROLLER_INVALID_IO_TAG
) {
1715 sci_req
->was_tag_assigned_by_user
= false;
1716 sci_req
->task_context_buffer
= NULL
;
1718 sci_req
->was_tag_assigned_by_user
= true;
1720 sci_req
->task_context_buffer
=
1721 scic_sds_controller_get_task_context_buffer(scic
, io_tag
);
1725 enum sci_status
scic_io_request_construct(struct scic_sds_controller
*scic
,
1726 struct scic_sds_remote_device
*sci_dev
,
1728 void *user_io_request_object
,
1729 struct scic_sds_request
*sci_req
,
1730 struct scic_sds_request
**new_scic_io_request_handle
)
1732 struct domain_device
*dev
= sci_dev_to_domain(sci_dev
);
1733 enum sci_status status
= SCI_SUCCESS
;
1735 /* Build the common part of the request */
1736 scic_sds_general_request_construct(scic
, sci_dev
, io_tag
,
1737 user_io_request_object
, sci_req
);
1739 if (sci_dev
->rnc
.remote_node_index
== SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX
)
1740 return SCI_FAILURE_INVALID_REMOTE_DEVICE
;
1742 if (dev
->dev_type
== SAS_END_DEV
) {
1743 scic_sds_ssp_io_request_assign_buffers(sci_req
);
1744 } else if (dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
)) {
1745 scic_sds_stp_request_assign_buffers(sci_req
);
1746 memset(sci_req
->command_buffer
, 0, sizeof(struct sata_fis_reg_h2d
));
1747 } else if (dev_is_expander(dev
)) {
1748 scic_sds_smp_request_assign_buffers(sci_req
);
1749 memset(sci_req
->command_buffer
, 0, sizeof(struct smp_request
));
1751 status
= SCI_FAILURE_UNSUPPORTED_PROTOCOL
;
1753 if (status
== SCI_SUCCESS
) {
1754 memset(sci_req
->task_context_buffer
, 0,
1755 SCI_FIELD_OFFSET(struct scu_task_context
, sgl_pair_ab
));
1756 *new_scic_io_request_handle
= sci_req
;
1762 enum sci_status
scic_task_request_construct(struct scic_sds_controller
*scic
,
1763 struct scic_sds_remote_device
*sci_dev
,
1765 void *user_io_request_object
,
1766 struct scic_sds_request
*sci_req
,
1767 struct scic_sds_request
**new_sci_req
)
1769 struct domain_device
*dev
= sci_dev_to_domain(sci_dev
);
1770 enum sci_status status
= SCI_SUCCESS
;
1772 /* Build the common part of the request */
1773 scic_sds_general_request_construct(scic
, sci_dev
, io_tag
,
1774 user_io_request_object
,
1777 if (dev
->dev_type
== SAS_END_DEV
) {
1778 scic_sds_ssp_task_request_assign_buffers(sci_req
);
1780 sci_req
->has_started_substate_machine
= true;
1782 /* Construct the started sub-state machine. */
1783 sci_base_state_machine_construct(
1784 &sci_req
->started_substate_machine
,
1786 scic_sds_io_request_started_task_mgmt_substate_table
,
1787 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
1789 } else if (dev
->dev_type
== SATA_DEV
|| (dev
->tproto
& SAS_PROTOCOL_STP
))
1790 scic_sds_stp_request_assign_buffers(sci_req
);
1792 status
= SCI_FAILURE_UNSUPPORTED_PROTOCOL
;
1794 if (status
== SCI_SUCCESS
) {
1795 sci_req
->is_task_management_request
= true;
1796 memset(sci_req
->task_context_buffer
, 0, sizeof(struct scu_task_context
));
1797 *new_sci_req
= sci_req
;