isci: merge smp request substates into primary state machine
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / isci / stp_request.c
blobe94ece81ed9d7759baf7a679a649945ca652af87
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
5 * GPL LICENSE SUMMARY
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * BSD LICENSE
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <scsi/sas.h>
57 #include "sas.h"
58 #include "state_machine.h"
59 #include "remote_device.h"
60 #include "stp_request.h"
61 #include "unsolicited_frame_control.h"
62 #include "scu_completion_codes.h"
63 #include "scu_event_codes.h"
64 #include "scu_task_context.h"
65 #include "request.h"
67 /**
68 * This method is will fill in the SCU Task Context for any type of SATA
69 * request. This is called from the various SATA constructors.
70 * @sci_req: The general IO request object which is to be used in
71 * constructing the SCU task context.
72 * @task_context: The buffer pointer for the SCU task context which is being
73 * constructed.
75 * The general io request construction is complete. The buffer assignment for
76 * the command buffer is complete. none Revisit task context construction to
77 * determine what is common for SSP/SMP/STP task context structures.
79 static void scu_sata_reqeust_construct_task_context(
80 struct scic_sds_request *sci_req,
81 struct scu_task_context *task_context)
83 dma_addr_t dma_addr;
84 struct scic_sds_controller *controller;
85 struct scic_sds_remote_device *target_device;
86 struct scic_sds_port *target_port;
88 controller = scic_sds_request_get_controller(sci_req);
89 target_device = scic_sds_request_get_device(sci_req);
90 target_port = scic_sds_request_get_port(sci_req);
92 /* Fill in the TC with the its required data */
93 task_context->abort = 0;
94 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
95 task_context->initiator_request = 1;
96 task_context->connection_rate = target_device->connection_rate;
97 task_context->protocol_engine_index =
98 scic_sds_controller_get_protocol_engine_group(controller);
99 task_context->logical_port_index =
100 scic_sds_port_get_index(target_port);
101 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
102 task_context->valid = SCU_TASK_CONTEXT_VALID;
103 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
105 task_context->remote_node_index =
106 scic_sds_remote_device_get_index(sci_req->target_device);
107 task_context->command_code = 0;
109 task_context->link_layer_control = 0;
110 task_context->do_not_dma_ssp_good_response = 1;
111 task_context->strict_ordering = 0;
112 task_context->control_frame = 0;
113 task_context->timeout_enable = 0;
114 task_context->block_guard_enable = 0;
116 task_context->address_modifier = 0;
117 task_context->task_phase = 0x01;
119 task_context->ssp_command_iu_length =
120 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
122 /* Set the first word of the H2D REG FIS */
123 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
125 if (sci_req->was_tag_assigned_by_user) {
127 * Build the task context now since we have already read
128 * the data
130 sci_req->post_context =
131 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
132 (scic_sds_controller_get_protocol_engine_group(
133 controller) <<
134 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
135 (scic_sds_port_get_index(target_port) <<
136 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
137 scic_sds_io_tag_get_index(sci_req->io_tag));
138 } else {
140 * Build the task context now since we have already read
141 * the data.
142 * I/O tag index is not assigned because we have to wait
143 * until we get a TCi.
145 sci_req->post_context =
146 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
147 (scic_sds_controller_get_protocol_engine_group(
148 controller) <<
149 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
150 (scic_sds_port_get_index(target_port) <<
151 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
155 * Copy the physical address for the command buffer to the SCU Task
156 * Context. We must offset the command buffer by 4 bytes because the
157 * first 4 bytes are transfered in the body of the TC.
159 dma_addr = scic_io_request_get_dma_addr(sci_req,
160 ((char *) &sci_req->stp.cmd) +
161 sizeof(u32));
163 task_context->command_iu_upper = upper_32_bits(dma_addr);
164 task_context->command_iu_lower = lower_32_bits(dma_addr);
166 /* SATA Requests do not have a response buffer */
167 task_context->response_iu_upper = 0;
168 task_context->response_iu_lower = 0;
173 * @sci_req:
175 * This method will perform any general sata request construction. What part of
176 * SATA IO request construction is general? none
178 static void scic_sds_stp_non_ncq_request_construct(
179 struct scic_sds_request *sci_req)
181 sci_req->has_started_substate_machine = true;
186 * @sci_req: This parameter specifies the request to be constructed as an
187 * optimized request.
188 * @optimized_task_type: This parameter specifies whether the request is to be
189 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
190 * value of 1 indicates NCQ.
192 * This method will perform request construction common to all types of STP
193 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
194 * returns an indication as to whether the construction was successful.
196 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
197 u8 optimized_task_type,
198 u32 len,
199 enum dma_data_direction dir)
201 struct scu_task_context *task_context = sci_req->task_context_buffer;
203 /* Build the STP task context structure */
204 scu_sata_reqeust_construct_task_context(sci_req, task_context);
206 /* Copy over the SGL elements */
207 scic_sds_request_build_sgl(sci_req);
209 /* Copy over the number of bytes to be transfered */
210 task_context->transfer_length_bytes = len;
212 if (dir == DMA_TO_DEVICE) {
214 * The difference between the DMA IN and DMA OUT request task type
215 * values are consistent with the difference between FPDMA READ
216 * and FPDMA WRITE values. Add the supplied task type parameter
217 * to this difference to set the task type properly for this
218 * DATA OUT (WRITE) case. */
219 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
220 - SCU_TASK_TYPE_DMA_IN);
221 } else {
223 * For the DATA IN (READ) case, simply save the supplied
224 * optimized task type. */
225 task_context->task_type = optimized_task_type;
231 * @sci_req: This parameter specifies the request to be constructed.
233 * This method will construct the STP UDMA request and its associated TC data.
234 * This method returns an indication as to whether the construction was
235 * successful. SCI_SUCCESS Currently this method always returns this value.
237 enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
238 u32 len,
239 enum dma_data_direction dir)
241 scic_sds_stp_optimized_request_construct(sci_req,
242 SCU_TASK_TYPE_FPDMAQ_READ,
243 len, dir);
244 return SCI_SUCCESS;
248 * scu_stp_raw_request_construct_task_context -
249 * @sci_req: This parameter specifies the STP request object for which to
250 * construct a RAW command frame task context.
251 * @task_context: This parameter specifies the SCU specific task context buffer
252 * to construct.
254 * This method performs the operations common to all SATA/STP requests
255 * utilizing the raw frame method. none
257 static void scu_stp_raw_request_construct_task_context(
258 struct scic_sds_stp_request *stp_req,
259 struct scu_task_context *task_context)
261 struct scic_sds_request *sci_req = to_sci_req(stp_req);
263 scu_sata_reqeust_construct_task_context(sci_req, task_context);
265 task_context->control_frame = 0;
266 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
267 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
268 task_context->type.stp.fis_type = FIS_REGH2D;
269 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
272 void scic_stp_io_request_set_ncq_tag(
273 struct scic_sds_request *req,
274 u16 ncq_tag)
277 * @note This could be made to return an error to the user if the user
278 * attempts to set the NCQ tag in the wrong state.
280 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
285 * @sci_req:
287 * Get the next SGL element from the request. - Check on which SGL element pair
288 * we are working - if working on SLG pair element A - advance to element B -
289 * else - check to see if there are more SGL element pairs for this IO request
290 * - if there are more SGL element pairs - advance to the next pair and return
291 * element A struct scu_sgl_element*
293 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
295 struct scu_sgl_element *current_sgl;
296 struct scic_sds_request *sci_req = to_sci_req(stp_req);
297 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
299 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
300 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
301 pio_sgl->sgl_pair->B.address_upper == 0) {
302 current_sgl = NULL;
303 } else {
304 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
305 current_sgl = &pio_sgl->sgl_pair->B;
307 } else {
308 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
309 pio_sgl->sgl_pair->next_pair_upper == 0) {
310 current_sgl = NULL;
311 } else {
312 u64 phys_addr;
314 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
315 phys_addr <<= 32;
316 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
318 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
319 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
320 current_sgl = &pio_sgl->sgl_pair->A;
324 return current_sgl;
329 * @sci_req:
330 * @completion_code:
332 * This method processes a TC completion. The expected TC completion is for
333 * the transmission of the H2D register FIS containing the SATA/STP non-data
334 * request. This method always successfully processes the TC completion.
335 * SCI_SUCCESS This value is always returned.
337 static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
338 struct scic_sds_request *sci_req,
339 u32 completion_code)
341 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
342 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
343 scic_sds_request_set_status(
344 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
347 sci_base_state_machine_change_state(
348 &sci_req->started_substate_machine,
349 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
351 break;
353 default:
355 * All other completion status cause the IO to be complete. If a NAK
356 * was received, then it is up to the user to retry the request. */
357 scic_sds_request_set_status(
358 sci_req,
359 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
360 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
363 sci_base_state_machine_change_state(
364 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
365 break;
368 return SCI_SUCCESS;
373 * @request: This parameter specifies the request for which a frame has been
374 * received.
375 * @frame_index: This parameter specifies the index of the frame that has been
376 * received.
378 * This method processes frames received from the target while waiting for a
379 * device to host register FIS. If a non-register FIS is received during this
380 * time, it is treated as a protocol violation from an IO perspective. Indicate
381 * if the received frame was processed successfully.
383 static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
384 struct scic_sds_request *sci_req,
385 u32 frame_index)
387 enum sci_status status;
388 struct dev_to_host_fis *frame_header;
389 u32 *frame_buffer;
390 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
391 struct scic_sds_controller *scic = sci_req->owning_controller;
393 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
394 frame_index,
395 (void **)&frame_header);
397 if (status != SCI_SUCCESS) {
398 dev_err(scic_to_dev(sci_req->owning_controller),
399 "%s: SCIC IO Request 0x%p could not get frame header "
400 "for frame index %d, status %x\n",
401 __func__, stp_req, frame_index, status);
403 return status;
406 switch (frame_header->fis_type) {
407 case FIS_REGD2H:
408 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
409 frame_index,
410 (void **)&frame_buffer);
412 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
413 frame_header,
414 frame_buffer);
416 /* The command has completed with error */
417 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
418 SCI_FAILURE_IO_RESPONSE_VALID);
419 break;
421 default:
422 dev_warn(scic_to_dev(scic),
423 "%s: IO Request:0x%p Frame Id:%d protocol "
424 "violation occurred\n", __func__, stp_req,
425 frame_index);
427 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
428 SCI_FAILURE_PROTOCOL_VIOLATION);
429 break;
432 sci_base_state_machine_change_state(&sci_req->state_machine,
433 SCI_BASE_REQUEST_STATE_COMPLETED);
435 /* Frame has been decoded return it to the controller */
436 scic_sds_controller_release_frame(scic, frame_index);
438 return status;
441 /* --------------------------------------------------------------------------- */
443 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
444 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
445 .abort_handler = scic_sds_request_started_state_abort_handler,
446 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
448 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
449 .abort_handler = scic_sds_request_started_state_abort_handler,
450 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
454 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
455 void *object)
457 struct scic_sds_request *sci_req = object;
459 SET_STATE_HANDLER(
460 sci_req,
461 scic_sds_stp_request_started_non_data_substate_handler_table,
462 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
465 scic_sds_remote_device_set_working_request(
466 sci_req->target_device, sci_req
470 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
472 struct scic_sds_request *sci_req = object;
474 SET_STATE_HANDLER(
475 sci_req,
476 scic_sds_stp_request_started_non_data_substate_handler_table,
477 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
481 /* --------------------------------------------------------------------------- */
483 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
484 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
485 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
487 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
488 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
492 enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
494 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
496 scic_sds_stp_non_ncq_request_construct(sci_req);
498 /* Build the STP task context structure */
499 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
501 sci_base_state_machine_construct(&sci_req->started_substate_machine,
502 sci_req,
503 scic_sds_stp_request_started_non_data_substate_table,
504 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
506 return SCI_SUCCESS;
509 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
511 /* transmit DATA_FIS from (current sgl + offset) for input
512 * parameter length. current sgl and offset is alreay stored in the IO request
514 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
515 struct scic_sds_request *sci_req,
516 u32 length)
518 struct scic_sds_controller *scic = sci_req->owning_controller;
519 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
520 struct scu_task_context *task_context;
521 struct scu_sgl_element *current_sgl;
523 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
524 * for the data from current_sgl+offset for the input length
526 task_context = scic_sds_controller_get_task_context_buffer(scic,
527 sci_req->io_tag);
529 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
530 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
531 else
532 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
534 /* update the TC */
535 task_context->command_iu_upper = current_sgl->address_upper;
536 task_context->command_iu_lower = current_sgl->address_lower;
537 task_context->transfer_length_bytes = length;
538 task_context->type.stp.fis_type = FIS_DATA;
540 /* send the new TC out. */
541 return scic_controller_continue_io(sci_req);
544 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
547 struct scu_sgl_element *current_sgl;
548 u32 sgl_offset;
549 u32 remaining_bytes_in_current_sgl = 0;
550 enum sci_status status = SCI_SUCCESS;
551 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
553 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
555 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
556 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
557 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
558 } else {
559 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
560 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
564 if (stp_req->type.pio.pio_transfer_bytes > 0) {
565 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
566 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
567 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
568 if (status == SCI_SUCCESS) {
569 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
571 /* update the current sgl, sgl_offset and save for future */
572 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
573 sgl_offset = 0;
575 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
576 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
577 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
579 if (status == SCI_SUCCESS) {
580 /* Sgl offset will be adjusted and saved for future */
581 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
582 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
583 stp_req->type.pio.pio_transfer_bytes = 0;
588 if (status == SCI_SUCCESS) {
589 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
592 return status;
597 * @stp_request: The request that is used for the SGL processing.
598 * @data_buffer: The buffer of data to be copied.
599 * @length: The length of the data transfer.
601 * Copy the data from the buffer for the length specified to the IO reqeust SGL
602 * specified data region. enum sci_status
604 static enum sci_status
605 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
606 u8 *data_buf, u32 len)
608 struct scic_sds_request *sci_req;
609 struct isci_request *ireq;
610 u8 *src_addr;
611 int copy_len;
612 struct sas_task *task;
613 struct scatterlist *sg;
614 void *kaddr;
615 int total_len = len;
617 sci_req = to_sci_req(stp_req);
618 ireq = sci_req_to_ireq(sci_req);
619 task = isci_request_access_task(ireq);
620 src_addr = data_buf;
622 if (task->num_scatter > 0) {
623 sg = task->scatter;
625 while (total_len > 0) {
626 struct page *page = sg_page(sg);
628 copy_len = min_t(int, total_len, sg_dma_len(sg));
629 kaddr = kmap_atomic(page, KM_IRQ0);
630 memcpy(kaddr + sg->offset, src_addr, copy_len);
631 kunmap_atomic(kaddr, KM_IRQ0);
632 total_len -= copy_len;
633 src_addr += copy_len;
634 sg = sg_next(sg);
636 } else {
637 BUG_ON(task->total_xfer_len < total_len);
638 memcpy(task->scatter, src_addr, total_len);
641 return SCI_SUCCESS;
646 * @sci_req: The PIO DATA IN request that is to receive the data.
647 * @data_buffer: The buffer to copy from.
649 * Copy the data buffer to the io request data region. enum sci_status
651 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
652 struct scic_sds_stp_request *sci_req,
653 u8 *data_buffer)
655 enum sci_status status;
658 * If there is less than 1K remaining in the transfer request
659 * copy just the data for the transfer */
660 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
661 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
662 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
664 if (status == SCI_SUCCESS)
665 sci_req->type.pio.pio_transfer_bytes = 0;
666 } else {
667 /* We are transfering the whole frame so copy */
668 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
669 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
671 if (status == SCI_SUCCESS)
672 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
675 return status;
680 * @sci_req:
681 * @completion_code:
683 * enum sci_status
685 static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
686 struct scic_sds_request *sci_req,
687 u32 completion_code)
689 enum sci_status status = SCI_SUCCESS;
691 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
692 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
693 scic_sds_request_set_status(
694 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
697 sci_base_state_machine_change_state(
698 &sci_req->started_substate_machine,
699 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
701 break;
703 default:
705 * All other completion status cause the IO to be complete. If a NAK
706 * was received, then it is up to the user to retry the request. */
707 scic_sds_request_set_status(
708 sci_req,
709 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
710 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
713 sci_base_state_machine_change_state(
714 &sci_req->state_machine,
715 SCI_BASE_REQUEST_STATE_COMPLETED
717 break;
720 return status;
723 static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(struct scic_sds_request *sci_req,
724 u32 frame_index)
726 struct scic_sds_controller *scic = sci_req->owning_controller;
727 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
728 struct isci_request *ireq = sci_req_to_ireq(sci_req);
729 struct sas_task *task = isci_request_access_task(ireq);
730 struct dev_to_host_fis *frame_header;
731 enum sci_status status;
732 u32 *frame_buffer;
734 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
735 frame_index,
736 (void **)&frame_header);
738 if (status != SCI_SUCCESS) {
739 dev_err(scic_to_dev(scic),
740 "%s: SCIC IO Request 0x%p could not get frame header "
741 "for frame index %d, status %x\n",
742 __func__, stp_req, frame_index, status);
743 return status;
746 switch (frame_header->fis_type) {
747 case FIS_PIO_SETUP:
748 /* Get from the frame buffer the PIO Setup Data */
749 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
750 frame_index,
751 (void **)&frame_buffer);
753 /* Get the data from the PIO Setup The SCU Hardware returns
754 * first word in the frame_header and the rest of the data is in
755 * the frame buffer so we need to back up one dword
758 /* transfer_count: first 16bits in the 4th dword */
759 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
761 /* ending_status: 4th byte in the 3rd dword */
762 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
764 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
765 frame_header,
766 frame_buffer);
768 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
770 /* The next state is dependent on whether the
771 * request was PIO Data-in or Data out
773 if (task->data_dir == DMA_FROM_DEVICE) {
774 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
775 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
776 } else if (task->data_dir == DMA_TO_DEVICE) {
777 /* Transmit data */
778 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
779 if (status != SCI_SUCCESS)
780 break;
781 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
782 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
784 break;
785 case FIS_SETDEVBITS:
786 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
787 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
788 break;
789 case FIS_REGD2H:
790 if (frame_header->status & ATA_BUSY) {
791 /* Now why is the drive sending a D2H Register FIS when
792 * it is still busy? Do nothing since we are still in
793 * the right state.
795 dev_dbg(scic_to_dev(scic),
796 "%s: SCIC PIO Request 0x%p received "
797 "D2H Register FIS with BSY status "
798 "0x%x\n", __func__, stp_req,
799 frame_header->status);
800 break;
803 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
804 frame_index,
805 (void **)&frame_buffer);
807 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
808 frame_header,
809 frame_buffer);
811 scic_sds_request_set_status(sci_req,
812 SCU_TASK_DONE_CHECK_RESPONSE,
813 SCI_FAILURE_IO_RESPONSE_VALID);
815 sci_base_state_machine_change_state(&sci_req->state_machine,
816 SCI_BASE_REQUEST_STATE_COMPLETED);
817 break;
818 default:
819 /* FIXME: what do we do here? */
820 break;
823 /* Frame is decoded return it to the controller */
824 scic_sds_controller_release_frame(scic, frame_index);
826 return status;
829 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(struct scic_sds_request *sci_req,
830 u32 frame_index)
832 enum sci_status status;
833 struct dev_to_host_fis *frame_header;
834 struct sata_fis_data *frame_buffer;
835 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
836 struct scic_sds_controller *scic = sci_req->owning_controller;
838 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
839 frame_index,
840 (void **)&frame_header);
842 if (status != SCI_SUCCESS) {
843 dev_err(scic_to_dev(scic),
844 "%s: SCIC IO Request 0x%p could not get frame header "
845 "for frame index %d, status %x\n",
846 __func__, stp_req, frame_index, status);
847 return status;
850 if (frame_header->fis_type == FIS_DATA) {
851 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
852 sci_req->saved_rx_frame_index = frame_index;
853 stp_req->type.pio.pio_transfer_bytes = 0;
854 } else {
855 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
856 frame_index,
857 (void **)&frame_buffer);
859 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
860 (u8 *)frame_buffer);
862 /* Frame is decoded return it to the controller */
863 scic_sds_controller_release_frame(scic, frame_index);
866 /* Check for the end of the transfer, are there more
867 * bytes remaining for this data transfer
869 if (status != SCI_SUCCESS ||
870 stp_req->type.pio.pio_transfer_bytes != 0)
871 return status;
873 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
874 scic_sds_request_set_status(sci_req,
875 SCU_TASK_DONE_CHECK_RESPONSE,
876 SCI_FAILURE_IO_RESPONSE_VALID);
878 sci_base_state_machine_change_state(&sci_req->state_machine,
879 SCI_BASE_REQUEST_STATE_COMPLETED);
880 } else {
881 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
882 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
884 } else {
885 dev_err(scic_to_dev(scic),
886 "%s: SCIC PIO Request 0x%p received frame %d "
887 "with fis type 0x%02x when expecting a data "
888 "fis.\n", __func__, stp_req, frame_index,
889 frame_header->fis_type);
891 scic_sds_request_set_status(sci_req,
892 SCU_TASK_DONE_GOOD,
893 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
895 sci_base_state_machine_change_state(&sci_req->state_machine,
896 SCI_BASE_REQUEST_STATE_COMPLETED);
898 /* Frame is decoded return it to the controller */
899 scic_sds_controller_release_frame(scic, frame_index);
902 return status;
908 * @sci_req:
909 * @completion_code:
911 * enum sci_status
913 static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
915 struct scic_sds_request *sci_req,
916 u32 completion_code)
918 enum sci_status status = SCI_SUCCESS;
919 bool all_frames_transferred = false;
920 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
922 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
923 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
924 /* Transmit data */
925 if (stp_req->type.pio.pio_transfer_bytes != 0) {
926 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
927 if (status == SCI_SUCCESS) {
928 if (stp_req->type.pio.pio_transfer_bytes == 0)
929 all_frames_transferred = true;
931 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
933 * this will happen if the all data is written at the
934 * first time after the pio setup fis is received
936 all_frames_transferred = true;
939 /* all data transferred. */
940 if (all_frames_transferred) {
942 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
943 * and wait for PIO_SETUP fis / or D2H REg fis. */
944 sci_base_state_machine_change_state(
945 &sci_req->started_substate_machine,
946 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
949 break;
951 default:
953 * All other completion status cause the IO to be complete. If a NAK
954 * was received, then it is up to the user to retry the request. */
955 scic_sds_request_set_status(
956 sci_req,
957 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
958 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
961 sci_base_state_machine_change_state(
962 &sci_req->state_machine,
963 SCI_BASE_REQUEST_STATE_COMPLETED
965 break;
968 return status;
973 * @request: This is the request which is receiving the event.
974 * @event_code: This is the event code that the request on which the request is
975 * expected to take action.
977 * This method will handle any link layer events while waiting for the data
978 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
980 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
981 struct scic_sds_request *request,
982 u32 event_code)
984 enum sci_status status;
986 switch (scu_get_event_specifier(event_code)) {
987 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
989 * We are waiting for data and the SCU has R_ERR the data frame.
990 * Go back to waiting for the D2H Register FIS */
991 sci_base_state_machine_change_state(
992 &request->started_substate_machine,
993 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
996 status = SCI_SUCCESS;
997 break;
999 default:
1000 dev_err(scic_to_dev(request->owning_controller),
1001 "%s: SCIC PIO Request 0x%p received unexpected "
1002 "event 0x%08x\n",
1003 __func__, request, event_code);
1005 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1006 status = SCI_FAILURE;
1007 break;
1010 return status;
1013 /* --------------------------------------------------------------------------- */
1015 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1016 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1017 .abort_handler = scic_sds_request_started_state_abort_handler,
1018 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1020 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1021 .abort_handler = scic_sds_request_started_state_abort_handler,
1022 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1024 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1025 .abort_handler = scic_sds_request_started_state_abort_handler,
1026 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1027 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1029 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1030 .abort_handler = scic_sds_request_started_state_abort_handler,
1031 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1035 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1036 void *object)
1038 struct scic_sds_request *sci_req = object;
1040 SET_STATE_HANDLER(
1041 sci_req,
1042 scic_sds_stp_request_started_pio_substate_handler_table,
1043 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1046 scic_sds_remote_device_set_working_request(
1047 sci_req->target_device, sci_req);
1050 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1052 struct scic_sds_request *sci_req = object;
1054 SET_STATE_HANDLER(
1055 sci_req,
1056 scic_sds_stp_request_started_pio_substate_handler_table,
1057 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1061 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1062 void *object)
1064 struct scic_sds_request *sci_req = object;
1066 SET_STATE_HANDLER(
1067 sci_req,
1068 scic_sds_stp_request_started_pio_substate_handler_table,
1069 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1073 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1074 void *object)
1076 struct scic_sds_request *sci_req = object;
1078 SET_STATE_HANDLER(
1079 sci_req,
1080 scic_sds_stp_request_started_pio_substate_handler_table,
1081 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1085 /* --------------------------------------------------------------------------- */
1087 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1088 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1089 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1091 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1092 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1094 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1095 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1097 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1098 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1102 enum sci_status
1103 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1104 bool copy_rx_frame)
1106 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1107 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1109 scic_sds_stp_non_ncq_request_construct(sci_req);
1111 scu_stp_raw_request_construct_task_context(stp_req,
1112 sci_req->task_context_buffer);
1114 pio->current_transfer_bytes = 0;
1115 pio->ending_error = 0;
1116 pio->ending_status = 0;
1118 pio->request_current.sgl_offset = 0;
1119 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1121 if (copy_rx_frame) {
1122 scic_sds_request_build_sgl(sci_req);
1123 /* Since the IO request copy of the TC contains the same data as
1124 * the actual TC this pointer is vaild for either.
1126 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1127 } else {
1128 /* The user does not want the data copied to the SGL buffer location */
1129 pio->request_current.sgl_pair = NULL;
1132 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1133 sci_req,
1134 scic_sds_stp_request_started_pio_substate_table,
1135 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1137 return SCI_SUCCESS;
1140 static void scic_sds_stp_request_udma_complete_request(
1141 struct scic_sds_request *request,
1142 u32 scu_status,
1143 enum sci_status sci_status)
1145 scic_sds_request_set_status(request, scu_status, sci_status);
1146 sci_base_state_machine_change_state(&request->state_machine,
1147 SCI_BASE_REQUEST_STATE_COMPLETED);
1150 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1151 u32 frame_index)
1153 struct scic_sds_controller *scic = sci_req->owning_controller;
1154 struct dev_to_host_fis *frame_header;
1155 enum sci_status status;
1156 u32 *frame_buffer;
1158 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1159 frame_index,
1160 (void **)&frame_header);
1162 if ((status == SCI_SUCCESS) &&
1163 (frame_header->fis_type == FIS_REGD2H)) {
1164 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1165 frame_index,
1166 (void **)&frame_buffer);
1168 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1169 frame_header,
1170 frame_buffer);
1173 scic_sds_controller_release_frame(scic, frame_index);
1175 return status;
1178 static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1179 struct scic_sds_request *sci_req,
1180 u32 completion_code)
1182 enum sci_status status = SCI_SUCCESS;
1184 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1185 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1186 scic_sds_stp_request_udma_complete_request(sci_req,
1187 SCU_TASK_DONE_GOOD,
1188 SCI_SUCCESS);
1189 break;
1190 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1191 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1193 * We must check ther response buffer to see if the D2H Register FIS was
1194 * received before we got the TC completion. */
1195 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
1196 scic_sds_remote_device_suspend(sci_req->target_device,
1197 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1199 scic_sds_stp_request_udma_complete_request(sci_req,
1200 SCU_TASK_DONE_CHECK_RESPONSE,
1201 SCI_FAILURE_IO_RESPONSE_VALID);
1202 } else {
1204 * If we have an error completion status for the TC then we can expect a
1205 * D2H register FIS from the device so we must change state to wait for it */
1206 sci_base_state_machine_change_state(&sci_req->started_substate_machine,
1207 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
1209 break;
1212 * / @todo Check to see if any of these completion status need to wait for
1213 * / the device to host register fis. */
1214 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1215 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1216 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1217 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1218 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1219 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1220 scic_sds_remote_device_suspend(sci_req->target_device,
1221 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
1222 /* Fall through to the default case */
1223 default:
1224 /* All other completion status cause the IO to be complete. */
1225 scic_sds_stp_request_udma_complete_request(sci_req,
1226 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1227 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1228 break;
1231 return status;
1234 static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1235 struct scic_sds_request *sci_req,
1236 u32 frame_index)
1238 enum sci_status status;
1240 /* Use the general frame handler to copy the resposne data */
1241 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1243 if (status != SCI_SUCCESS)
1244 return status;
1246 scic_sds_stp_request_udma_complete_request(sci_req,
1247 SCU_TASK_DONE_CHECK_RESPONSE,
1248 SCI_FAILURE_IO_RESPONSE_VALID);
1250 return status;
1253 /* --------------------------------------------------------------------------- */
1255 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1256 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1257 .abort_handler = scic_sds_request_started_state_abort_handler,
1258 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1259 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1261 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1262 .abort_handler = scic_sds_request_started_state_abort_handler,
1263 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1267 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1268 void *object)
1270 struct scic_sds_request *sci_req = object;
1272 SET_STATE_HANDLER(
1273 sci_req,
1274 scic_sds_stp_request_started_udma_substate_handler_table,
1275 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1282 * This state is entered when there is an TC completion failure. The hardware
1283 * received an unexpected condition while processing the IO request and now
1284 * will UF the D2H register FIS to complete the IO.
1286 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1287 void *object)
1289 struct scic_sds_request *sci_req = object;
1291 SET_STATE_HANDLER(
1292 sci_req,
1293 scic_sds_stp_request_started_udma_substate_handler_table,
1294 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1298 /* --------------------------------------------------------------------------- */
1300 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1301 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1302 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1304 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1305 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1309 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1310 u32 len,
1311 enum dma_data_direction dir)
1313 scic_sds_stp_non_ncq_request_construct(sci_req);
1315 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1316 len, dir);
1318 sci_base_state_machine_construct(
1319 &sci_req->started_substate_machine,
1320 sci_req,
1321 scic_sds_stp_request_started_udma_substate_table,
1322 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1325 return SCI_SUCCESS;
1330 * @sci_req:
1331 * @completion_code:
1333 * This method processes a TC completion. The expected TC completion is for
1334 * the transmission of the H2D register FIS containing the SATA/STP non-data
1335 * request. This method always successfully processes the TC completion.
1336 * SCI_SUCCESS This value is always returned.
1338 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1339 struct scic_sds_request *sci_req,
1340 u32 completion_code)
1342 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1343 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1344 scic_sds_request_set_status(
1345 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1348 sci_base_state_machine_change_state(
1349 &sci_req->started_substate_machine,
1350 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1352 break;
1354 default:
1356 * All other completion status cause the IO to be complete. If a NAK
1357 * was received, then it is up to the user to retry the request. */
1358 scic_sds_request_set_status(
1359 sci_req,
1360 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1361 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1364 sci_base_state_machine_change_state(
1365 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1366 break;
1369 return SCI_SUCCESS;
1374 * @sci_req:
1375 * @completion_code:
1377 * This method processes a TC completion. The expected TC completion is for
1378 * the transmission of the H2D register FIS containing the SATA/STP non-data
1379 * request. This method always successfully processes the TC completion.
1380 * SCI_SUCCESS This value is always returned.
1382 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1383 struct scic_sds_request *sci_req,
1384 u32 completion_code)
1386 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1387 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1388 scic_sds_request_set_status(
1389 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1392 sci_base_state_machine_change_state(
1393 &sci_req->started_substate_machine,
1394 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1396 break;
1398 default:
1400 * All other completion status cause the IO to be complete. If a NAK
1401 * was received, then it is up to the user to retry the request. */
1402 scic_sds_request_set_status(
1403 sci_req,
1404 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1405 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1408 sci_base_state_machine_change_state(&sci_req->state_machine,
1409 SCI_BASE_REQUEST_STATE_COMPLETED);
1410 break;
1413 return SCI_SUCCESS;
1418 * @request: This parameter specifies the request for which a frame has been
1419 * received.
1420 * @frame_index: This parameter specifies the index of the frame that has been
1421 * received.
1423 * This method processes frames received from the target while waiting for a
1424 * device to host register FIS. If a non-register FIS is received during this
1425 * time, it is treated as a protocol violation from an IO perspective. Indicate
1426 * if the received frame was processed successfully.
1428 static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1429 struct scic_sds_request *sci_req,
1430 u32 frame_index)
1432 enum sci_status status;
1433 struct dev_to_host_fis *frame_header;
1434 u32 *frame_buffer;
1435 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1436 struct scic_sds_controller *scic = sci_req->owning_controller;
1438 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1439 frame_index,
1440 (void **)&frame_header);
1441 if (status != SCI_SUCCESS) {
1442 dev_err(scic_to_dev(scic),
1443 "%s: SCIC IO Request 0x%p could not get frame header "
1444 "for frame index %d, status %x\n",
1445 __func__, stp_req, frame_index, status);
1446 return status;
1449 switch (frame_header->fis_type) {
1450 case FIS_REGD2H:
1451 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1452 frame_index,
1453 (void **)&frame_buffer);
1455 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1456 frame_header,
1457 frame_buffer);
1459 /* The command has completed with error */
1460 scic_sds_request_set_status(sci_req,
1461 SCU_TASK_DONE_CHECK_RESPONSE,
1462 SCI_FAILURE_IO_RESPONSE_VALID);
1463 break;
1465 default:
1466 dev_warn(scic_to_dev(scic),
1467 "%s: IO Request:0x%p Frame Id:%d protocol "
1468 "violation occurred\n", __func__, stp_req,
1469 frame_index);
1471 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1472 SCI_FAILURE_PROTOCOL_VIOLATION);
1473 break;
1476 sci_base_state_machine_change_state(&sci_req->state_machine,
1477 SCI_BASE_REQUEST_STATE_COMPLETED);
1479 /* Frame has been decoded return it to the controller */
1480 scic_sds_controller_release_frame(scic, frame_index);
1482 return status;
1485 /* --------------------------------------------------------------------------- */
1487 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1488 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1489 .abort_handler = scic_sds_request_started_state_abort_handler,
1490 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1492 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1493 .abort_handler = scic_sds_request_started_state_abort_handler,
1494 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1496 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1497 .abort_handler = scic_sds_request_started_state_abort_handler,
1498 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1502 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1503 void *object)
1505 struct scic_sds_request *sci_req = object;
1507 SET_STATE_HANDLER(
1508 sci_req,
1509 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1510 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1513 scic_sds_remote_device_set_working_request(
1514 sci_req->target_device, sci_req
1518 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1519 void *object)
1521 struct scic_sds_request *sci_req = object;
1522 struct scu_task_context *task_context;
1523 struct host_to_dev_fis *h2d_fis;
1524 enum sci_status status;
1526 /* Clear the SRST bit */
1527 h2d_fis = &sci_req->stp.cmd;
1528 h2d_fis->control = 0;
1530 /* Clear the TC control bit */
1531 task_context = scic_sds_controller_get_task_context_buffer(
1532 sci_req->owning_controller, sci_req->io_tag);
1533 task_context->control_frame = 0;
1535 status = scic_controller_continue_io(sci_req);
1536 if (status == SCI_SUCCESS) {
1537 SET_STATE_HANDLER(
1538 sci_req,
1539 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1540 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1545 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1546 void *object)
1548 struct scic_sds_request *sci_req = object;
1550 SET_STATE_HANDLER(
1551 sci_req,
1552 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1553 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1557 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1558 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1559 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1561 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1562 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1564 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1565 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1569 enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1571 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1573 scic_sds_stp_non_ncq_request_construct(sci_req);
1575 /* Build the STP task context structure */
1576 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1578 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1579 sci_req,
1580 scic_sds_stp_request_started_soft_reset_substate_table,
1581 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
1583 return SCI_SUCCESS;