isci: Convert of sci_ssp_response_iu to ssp_response_iu
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / isci / core / scic_sds_stp_request.c
blob8569dba6c68bfd41c8efce4797b5d41e2419f0ca
1 /*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
5 * GPL LICENSE SUMMARY
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
24 * BSD LICENSE
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
56 #include <scsi/sas.h>
57 #include "sas.h"
58 #include "sci_base_state.h"
59 #include "sci_base_state_machine.h"
60 #include "scic_io_request.h"
61 #include "scic_sds_controller.h"
62 #include "remote_device.h"
63 #include "scic_sds_request.h"
64 #include "scic_sds_stp_pio_request.h"
65 #include "scic_sds_stp_request.h"
66 #include "scic_sds_unsolicited_frame_control.h"
67 #include "sci_environment.h"
68 #include "sci_util.h"
69 #include "scu_completion_codes.h"
70 #include "scu_event_codes.h"
71 #include "scu_task_context.h"
73 /**
74 * scic_sds_stp_request_get_h2d_reg_buffer() -
76 * This macro returns the address of the stp h2d reg fis buffer in the io
77 * request memory
79 #define scic_sds_stp_request_get_h2d_reg_buffer(memory) \
80 ((struct host_to_dev_fis *)(\
81 ((char *)(memory)) + sizeof(struct scic_sds_stp_request) \
84 /**
85 * scic_sds_stp_request_get_response_buffer() -
87 * This macro returns the address of the ssp response iu buffer in the io
88 * request memory
90 #define scic_sds_stp_request_get_response_buffer(memory) \
91 ((struct dev_to_host_fis *)(\
92 ((char *)(scic_sds_stp_request_get_h2d_reg_buffer(memory))) \
93 + sizeof(struct host_to_dev_fis) \
96 /**
97 * scic_sds_stp_request_get_task_context_buffer() -
99 * This macro returns the address of the task context buffer in the io request
100 * memory
102 #define scic_sds_stp_request_get_task_context_buffer(memory) \
103 ((struct scu_task_context *)(\
104 ((char *)(scic_sds_stp_request_get_response_buffer(memory))) \
105 + SSP_RESP_IU_MAX_SIZE \
109 * scic_sds_stp_request_get_sgl_element_buffer() -
111 * This macro returns the address of the sgl elment pairs in the io request
112 * memory buffer
114 #define scic_sds_stp_request_get_sgl_element_buffer(memory) \
115 ((struct scu_sgl_element_pair *)(\
116 ((char *)(scic_sds_stp_request_get_task_context_buffer(memory))) \
117 + sizeof(struct scu_task_context) \
123 * This method return the memory space required for STP PIO requests. u32
125 u32 scic_sds_stp_request_get_object_size(void)
127 return sizeof(struct scic_sds_stp_request)
128 + sizeof(struct host_to_dev_fis)
129 + sizeof(struct dev_to_host_fis)
130 + sizeof(struct scu_task_context)
131 + SMP_CACHE_BYTES
132 + sizeof(struct scu_sgl_element_pair) * SCU_MAX_SGL_ELEMENT_PAIRS;
135 void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req)
137 struct scic_sds_stp_request *stp_req =
138 container_of(sci_req, typeof(*stp_req), parent);
140 sci_req->command_buffer = scic_sds_stp_request_get_h2d_reg_buffer(stp_req);
141 sci_req->response_buffer = scic_sds_stp_request_get_response_buffer(stp_req);
142 sci_req->sgl_element_pair_buffer = scic_sds_stp_request_get_sgl_element_buffer(stp_req);
143 sci_req->sgl_element_pair_buffer = PTR_ALIGN(sci_req->sgl_element_pair_buffer,
144 sizeof(struct scu_sgl_element_pair));
146 if (sci_req->was_tag_assigned_by_user == false) {
147 sci_req->task_context_buffer =
148 scic_sds_stp_request_get_task_context_buffer(stp_req);
149 sci_req->task_context_buffer = PTR_ALIGN(sci_req->task_context_buffer,
150 SMP_CACHE_BYTES);
155 * This method is will fill in the SCU Task Context for any type of SATA
156 * request. This is called from the various SATA constructors.
157 * @sci_req: The general IO request object which is to be used in
158 * constructing the SCU task context.
159 * @task_context: The buffer pointer for the SCU task context which is being
160 * constructed.
162 * The general io request construction is complete. The buffer assignment for
163 * the command buffer is complete. none Revisit task context construction to
164 * determine what is common for SSP/SMP/STP task context structures.
166 static void scu_sata_reqeust_construct_task_context(
167 struct scic_sds_request *sds_request,
168 struct scu_task_context *task_context)
170 dma_addr_t dma_addr;
171 struct scic_sds_controller *controller;
172 struct scic_sds_remote_device *target_device;
173 struct scic_sds_port *target_port;
175 controller = scic_sds_request_get_controller(sds_request);
176 target_device = scic_sds_request_get_device(sds_request);
177 target_port = scic_sds_request_get_port(sds_request);
179 /* Fill in the TC with the its required data */
180 task_context->abort = 0;
181 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
182 task_context->initiator_request = 1;
183 task_context->connection_rate = target_device->connection_rate;
184 task_context->protocol_engine_index =
185 scic_sds_controller_get_protocol_engine_group(controller);
186 task_context->logical_port_index =
187 scic_sds_port_get_index(target_port);
188 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
189 task_context->valid = SCU_TASK_CONTEXT_VALID;
190 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
192 task_context->remote_node_index =
193 scic_sds_remote_device_get_index(sds_request->target_device);
194 task_context->command_code = 0;
196 task_context->link_layer_control = 0;
197 task_context->do_not_dma_ssp_good_response = 1;
198 task_context->strict_ordering = 0;
199 task_context->control_frame = 0;
200 task_context->timeout_enable = 0;
201 task_context->block_guard_enable = 0;
203 task_context->address_modifier = 0;
204 task_context->task_phase = 0x01;
206 task_context->ssp_command_iu_length =
207 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
209 /* Set the first word of the H2D REG FIS */
210 task_context->type.words[0] = *(u32 *)sds_request->command_buffer;
212 if (sds_request->was_tag_assigned_by_user) {
214 * Build the task context now since we have already read
215 * the data
217 sds_request->post_context =
218 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
219 (scic_sds_controller_get_protocol_engine_group(
220 controller) <<
221 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
222 (scic_sds_port_get_index(target_port) <<
223 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
224 scic_sds_io_tag_get_index(sds_request->io_tag));
225 } else {
227 * Build the task context now since we have already read
228 * the data.
229 * I/O tag index is not assigned because we have to wait
230 * until we get a TCi.
232 sds_request->post_context =
233 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
234 (scic_sds_controller_get_protocol_engine_group(
235 controller) <<
236 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
237 (scic_sds_port_get_index(target_port) <<
238 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
242 * Copy the physical address for the command buffer to the SCU Task
243 * Context. We must offset the command buffer by 4 bytes because the
244 * first 4 bytes are transfered in the body of the TC.
246 dma_addr =
247 scic_io_request_get_dma_addr(sds_request,
248 (char *)sds_request->
249 command_buffer +
250 sizeof(u32));
252 task_context->command_iu_upper = upper_32_bits(dma_addr);
253 task_context->command_iu_lower = lower_32_bits(dma_addr);
255 /* SATA Requests do not have a response buffer */
256 task_context->response_iu_upper = 0;
257 task_context->response_iu_lower = 0;
262 * @sci_req:
264 * This method will perform any general sata request construction. What part of
265 * SATA IO request construction is general? none
267 static void scic_sds_stp_non_ncq_request_construct(
268 struct scic_sds_request *sci_req)
270 sci_req->has_started_substate_machine = true;
275 * @sci_req: This parameter specifies the request to be constructed as an
276 * optimized request.
277 * @optimized_task_type: This parameter specifies whether the request is to be
278 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
279 * value of 1 indicates NCQ.
281 * This method will perform request construction common to all types of STP
282 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
283 * returns an indication as to whether the construction was successful.
285 static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
286 u8 optimized_task_type,
287 u32 len,
288 enum dma_data_direction dir)
290 struct scu_task_context *task_context = sci_req->task_context_buffer;
292 /* Build the STP task context structure */
293 scu_sata_reqeust_construct_task_context(sci_req, task_context);
295 /* Copy over the SGL elements */
296 scic_sds_request_build_sgl(sci_req);
298 /* Copy over the number of bytes to be transfered */
299 task_context->transfer_length_bytes = len;
301 if (dir == DMA_TO_DEVICE) {
303 * The difference between the DMA IN and DMA OUT request task type
304 * values are consistent with the difference between FPDMA READ
305 * and FPDMA WRITE values. Add the supplied task type parameter
306 * to this difference to set the task type properly for this
307 * DATA OUT (WRITE) case. */
308 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
309 - SCU_TASK_TYPE_DMA_IN);
310 } else {
312 * For the DATA IN (READ) case, simply save the supplied
313 * optimized task type. */
314 task_context->task_type = optimized_task_type;
320 * @sci_req: This parameter specifies the request to be constructed.
322 * This method will construct the STP UDMA request and its associated TC data.
323 * This method returns an indication as to whether the construction was
324 * successful. SCI_SUCCESS Currently this method always returns this value.
326 enum sci_status scic_sds_stp_ncq_request_construct(struct scic_sds_request *sci_req,
327 u32 len,
328 enum dma_data_direction dir)
330 scic_sds_stp_optimized_request_construct(sci_req,
331 SCU_TASK_TYPE_FPDMAQ_READ,
332 len, dir);
333 return SCI_SUCCESS;
337 * scu_stp_raw_request_construct_task_context -
338 * @sci_req: This parameter specifies the STP request object for which to
339 * construct a RAW command frame task context.
340 * @task_context: This parameter specifies the SCU specific task context buffer
341 * to construct.
343 * This method performs the operations common to all SATA/STP requests
344 * utilizing the raw frame method. none
346 static void scu_stp_raw_request_construct_task_context(
347 struct scic_sds_stp_request *sci_req,
348 struct scu_task_context *task_context)
350 scu_sata_reqeust_construct_task_context(&sci_req->parent, task_context);
352 task_context->control_frame = 0;
353 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
354 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
355 task_context->type.stp.fis_type = FIS_REGH2D;
356 task_context->transfer_length_bytes =
357 sizeof(struct host_to_dev_fis) - sizeof(u32);
360 void scic_stp_io_request_set_ncq_tag(
361 struct scic_sds_request *req,
362 u16 ncq_tag)
365 * @note This could be made to return an error to the user if the user
366 * attempts to set the NCQ tag in the wrong state.
368 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
372 void *scic_stp_io_request_get_h2d_reg_address(
373 struct scic_sds_request *req)
375 return req->command_buffer;
379 void *scic_stp_io_request_get_d2h_reg_address(
380 struct scic_sds_request *req)
382 return &((struct scic_sds_stp_request *)req)->d2h_reg_fis;
387 * @sci_req:
389 * Get the next SGL element from the request. - Check on which SGL element pair
390 * we are working - if working on SLG pair element A - advance to element B -
391 * else - check to see if there are more SGL element pairs for this IO request
392 * - if there are more SGL element pairs - advance to the next pair and return
393 * element A struct scu_sgl_element*
395 static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
397 struct scu_sgl_element *current_sgl;
398 struct scic_sds_request *sci_req = &stp_req->parent;
399 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
401 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
402 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
403 pio_sgl->sgl_pair->B.address_upper == 0) {
404 current_sgl = NULL;
405 } else {
406 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
407 current_sgl = &pio_sgl->sgl_pair->B;
409 } else {
410 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
411 pio_sgl->sgl_pair->next_pair_upper == 0) {
412 current_sgl = NULL;
413 } else {
414 u64 phys_addr;
416 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
417 phys_addr <<= 32;
418 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
420 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
421 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
422 current_sgl = &pio_sgl->sgl_pair->A;
426 return current_sgl;
431 * @sci_req:
432 * @completion_code:
434 * This method processes a TC completion. The expected TC completion is for
435 * the transmission of the H2D register FIS containing the SATA/STP non-data
436 * request. This method always successfully processes the TC completion.
437 * SCI_SUCCESS This value is always returned.
439 static enum sci_status scic_sds_stp_request_non_data_await_h2d_tc_completion_handler(
440 struct scic_sds_request *sci_req,
441 u32 completion_code)
443 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
444 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
445 scic_sds_request_set_status(
446 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
449 sci_base_state_machine_change_state(
450 &sci_req->started_substate_machine,
451 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
453 break;
455 default:
457 * All other completion status cause the IO to be complete. If a NAK
458 * was received, then it is up to the user to retry the request. */
459 scic_sds_request_set_status(
460 sci_req,
461 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
462 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
465 sci_base_state_machine_change_state(
466 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
467 break;
470 return SCI_SUCCESS;
475 * @request: This parameter specifies the request for which a frame has been
476 * received.
477 * @frame_index: This parameter specifies the index of the frame that has been
478 * received.
480 * This method processes frames received from the target while waiting for a
481 * device to host register FIS. If a non-register FIS is received during this
482 * time, it is treated as a protocol violation from an IO perspective. Indicate
483 * if the received frame was processed successfully.
485 static enum sci_status scic_sds_stp_request_non_data_await_d2h_frame_handler(
486 struct scic_sds_request *sci_req,
487 u32 frame_index)
489 enum sci_status status;
490 struct dev_to_host_fis *frame_header;
491 u32 *frame_buffer;
492 struct scic_sds_stp_request *stp_req =
493 container_of(sci_req, typeof(*stp_req), parent);
495 status = scic_sds_unsolicited_frame_control_get_header(
496 &stp_req->parent.owning_controller->uf_control,
497 frame_index,
498 (void **)&frame_header);
500 if (status == SCI_SUCCESS) {
501 switch (frame_header->fis_type) {
502 case FIS_REGD2H:
503 scic_sds_unsolicited_frame_control_get_buffer(
504 &stp_req->parent.owning_controller->uf_control,
505 frame_index,
506 (void **)&frame_buffer);
508 scic_sds_controller_copy_sata_response(
509 &stp_req->d2h_reg_fis,
510 (u32 *)frame_header,
511 frame_buffer);
513 /* The command has completed with error */
514 scic_sds_request_set_status(
515 &stp_req->parent,
516 SCU_TASK_DONE_CHECK_RESPONSE,
517 SCI_FAILURE_IO_RESPONSE_VALID);
518 break;
520 default:
521 dev_warn(scic_to_dev(sci_req->owning_controller),
522 "%s: IO Request:0x%p Frame Id:%d protocol "
523 "violation occurred\n",
524 __func__, stp_req, frame_index);
526 scic_sds_request_set_status(
527 &stp_req->parent,
528 SCU_TASK_DONE_UNEXP_FIS,
529 SCI_FAILURE_PROTOCOL_VIOLATION);
530 break;
533 sci_base_state_machine_change_state(
534 &stp_req->parent.state_machine,
535 SCI_BASE_REQUEST_STATE_COMPLETED);
537 /* Frame has been decoded return it to the controller */
538 scic_sds_controller_release_frame(
539 stp_req->parent.owning_controller, frame_index);
540 } else
541 dev_err(scic_to_dev(sci_req->owning_controller),
542 "%s: SCIC IO Request 0x%p could not get frame header "
543 "for frame index %d, status %x\n",
544 __func__, stp_req, frame_index, status);
546 return status;
549 /* --------------------------------------------------------------------------- */
551 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_non_data_substate_handler_table[] = {
552 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
553 .abort_handler = scic_sds_request_started_state_abort_handler,
554 .tc_completion_handler = scic_sds_stp_request_non_data_await_h2d_tc_completion_handler,
556 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
557 .abort_handler = scic_sds_request_started_state_abort_handler,
558 .frame_handler = scic_sds_stp_request_non_data_await_d2h_frame_handler,
562 static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
563 void *object)
565 struct scic_sds_request *sci_req = object;
567 SET_STATE_HANDLER(
568 sci_req,
569 scic_sds_stp_request_started_non_data_substate_handler_table,
570 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
573 scic_sds_remote_device_set_working_request(
574 sci_req->target_device, sci_req
578 static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
580 struct scic_sds_request *sci_req = object;
582 SET_STATE_HANDLER(
583 sci_req,
584 scic_sds_stp_request_started_non_data_substate_handler_table,
585 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
589 /* --------------------------------------------------------------------------- */
591 static const struct sci_base_state scic_sds_stp_request_started_non_data_substate_table[] = {
592 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
593 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
595 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
596 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
600 enum sci_status scic_sds_stp_non_data_request_construct(struct scic_sds_request *sci_req)
602 struct scic_sds_stp_request *stp_req =
603 container_of(sci_req, typeof(*stp_req), parent);
605 scic_sds_stp_non_ncq_request_construct(sci_req);
607 /* Build the STP task context structure */
608 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
610 sci_base_state_machine_construct(&sci_req->started_substate_machine,
611 sci_req,
612 scic_sds_stp_request_started_non_data_substate_table,
613 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE);
615 return SCI_SUCCESS;
618 #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
622 * @sci_req:
623 * @length:
625 * This function will transmit DATA_FIS from (current sgl + offset) for input
626 * parameter length. current sgl and offset is alreay stored in the IO request
627 * enum sci_status
630 static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
631 struct scic_sds_request *sci_req,
632 u32 length)
634 struct scic_sds_stp_request *stp_req =
635 container_of(sci_req, typeof(*stp_req), parent);
636 struct scu_sgl_element *current_sgl;
639 * Recycle the TC and reconstruct it for sending out DATA FIS containing
640 * for the data from current_sgl+offset for the input length */
641 struct scu_task_context *task_context = scic_sds_controller_get_task_context_buffer(
642 sci_req->owning_controller,
643 sci_req->io_tag
646 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
647 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
648 else
649 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
651 /* update the TC */
652 task_context->command_iu_upper = current_sgl->address_upper;
653 task_context->command_iu_lower = current_sgl->address_lower;
654 task_context->transfer_length_bytes = length;
655 task_context->type.stp.fis_type = FIS_DATA;
657 /* send the new TC out. */
658 return scic_controller_continue_io(sci_req);
663 * @sci_req:
665 * enum sci_status
667 static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(
668 struct scic_sds_request *sci_req)
671 struct scu_sgl_element *current_sgl;
672 u32 sgl_offset;
673 u32 remaining_bytes_in_current_sgl = 0;
674 enum sci_status status = SCI_SUCCESS;
676 struct scic_sds_stp_request *stp_req =
677 container_of(sci_req, typeof(*stp_req), parent);
679 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
681 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
682 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
683 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
684 } else {
685 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
686 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
690 if (stp_req->type.pio.pio_transfer_bytes > 0) {
691 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
692 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
693 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
694 if (status == SCI_SUCCESS) {
695 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
697 /* update the current sgl, sgl_offset and save for future */
698 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
699 sgl_offset = 0;
701 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
702 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
703 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
705 if (status == SCI_SUCCESS) {
706 /* Sgl offset will be adjusted and saved for future */
707 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
708 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
709 stp_req->type.pio.pio_transfer_bytes = 0;
714 if (status == SCI_SUCCESS) {
715 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
718 return status;
723 * @stp_request: The request that is used for the SGL processing.
724 * @data_buffer: The buffer of data to be copied.
725 * @length: The length of the data transfer.
727 * Copy the data from the buffer for the length specified to the IO reqeust SGL
728 * specified data region. enum sci_status
730 static enum sci_status
731 scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
732 u8 *data_buf, u32 len)
734 struct scic_sds_request *sci_req;
735 struct isci_request *ireq;
736 u8 *src_addr;
737 int copy_len;
738 struct sas_task *task;
739 struct scatterlist *sg;
740 void *kaddr;
741 int total_len = len;
743 sci_req = &stp_req->parent;
744 ireq = scic_sds_request_get_user_request(sci_req);
745 task = isci_request_access_task(ireq);
746 src_addr = data_buf;
748 if (task->num_scatter > 0) {
749 sg = task->scatter;
751 while (total_len > 0) {
752 struct page *page = sg_page(sg);
754 copy_len = min_t(int, total_len, sg_dma_len(sg));
755 kaddr = kmap_atomic(page, KM_IRQ0);
756 memcpy(kaddr + sg->offset, src_addr, copy_len);
757 kunmap_atomic(kaddr, KM_IRQ0);
758 total_len -= copy_len;
759 src_addr += copy_len;
760 sg = sg_next(sg);
762 } else {
763 BUG_ON(task->total_xfer_len < total_len);
764 memcpy(task->scatter, src_addr, total_len);
767 return SCI_SUCCESS;
772 * @sci_req: The PIO DATA IN request that is to receive the data.
773 * @data_buffer: The buffer to copy from.
775 * Copy the data buffer to the io request data region. enum sci_status
777 static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
778 struct scic_sds_stp_request *sci_req,
779 u8 *data_buffer)
781 enum sci_status status;
784 * If there is less than 1K remaining in the transfer request
785 * copy just the data for the transfer */
786 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
787 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
788 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
790 if (status == SCI_SUCCESS)
791 sci_req->type.pio.pio_transfer_bytes = 0;
792 } else {
793 /* We are transfering the whole frame so copy */
794 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
795 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
797 if (status == SCI_SUCCESS)
798 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
801 return status;
806 * @sci_req:
807 * @completion_code:
809 * enum sci_status
811 static enum sci_status scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler(
812 struct scic_sds_request *sci_req,
813 u32 completion_code)
815 enum sci_status status = SCI_SUCCESS;
817 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
818 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
819 scic_sds_request_set_status(
820 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
823 sci_base_state_machine_change_state(
824 &sci_req->started_substate_machine,
825 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
827 break;
829 default:
831 * All other completion status cause the IO to be complete. If a NAK
832 * was received, then it is up to the user to retry the request. */
833 scic_sds_request_set_status(
834 sci_req,
835 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
836 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
839 sci_base_state_machine_change_state(
840 &sci_req->state_machine,
841 SCI_BASE_REQUEST_STATE_COMPLETED
843 break;
846 return status;
851 * @sci_req:
852 * @frame_index:
854 * enum sci_status
856 static enum sci_status scic_sds_stp_request_pio_await_frame_frame_handler(
857 struct scic_sds_request *sci_req,
858 u32 frame_index)
860 enum sci_status status;
861 struct dev_to_host_fis *frame_header;
862 u32 *frame_buffer;
863 struct scic_sds_stp_request *stp_req = container_of(sci_req, typeof(*stp_req), parent);
864 struct isci_request *ireq = sci_req->ireq;
865 struct sas_task *task = isci_request_access_task(ireq);
867 status = scic_sds_unsolicited_frame_control_get_header(
868 &(stp_req->parent.owning_controller->uf_control),
869 frame_index,
870 (void **)&frame_header);
872 if (status == SCI_SUCCESS) {
873 switch (frame_header->fis_type) {
874 case FIS_PIO_SETUP:
875 /* Get from the frame buffer the PIO Setup Data */
876 scic_sds_unsolicited_frame_control_get_buffer(
877 &(stp_req->parent.owning_controller->uf_control),
878 frame_index,
879 (void **)&frame_buffer);
881 /* Get the data from the PIO Setup The SCU Hardware
882 * returns first word in the frame_header and the rest
883 * of the data is in the frame buffer so we need to back
884 * up one dword
887 /* transfer_count: first 16bits in the 4th dword */
888 stp_req->type.pio.pio_transfer_bytes =
889 frame_buffer[3] & 0xffff;
891 /* ending_status: 4th byte in the 3rd dword */
892 stp_req->type.pio.ending_status =
893 (frame_buffer[2] >> 24) & 0xff;
895 scic_sds_controller_copy_sata_response(
896 &stp_req->d2h_reg_fis,
897 (u32 *)frame_header,
898 frame_buffer);
900 stp_req->d2h_reg_fis.status =
901 stp_req->type.pio.ending_status;
903 /* The next state is dependent on whether the
904 * request was PIO Data-in or Data out
906 if (task->data_dir == DMA_FROM_DEVICE) {
907 sci_base_state_machine_change_state(
908 &stp_req->parent.started_substate_machine,
909 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
910 } else if (task->data_dir == DMA_TO_DEVICE) {
911 /* Transmit data */
912 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
913 if (status == SCI_SUCCESS) {
914 sci_base_state_machine_change_state(
915 &stp_req->parent.started_substate_machine,
916 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
919 break;
921 case FIS_SETDEVBITS:
922 sci_base_state_machine_change_state(
923 &stp_req->parent.started_substate_machine,
924 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
925 break;
927 case FIS_REGD2H:
928 if ((frame_header->status & ATA_BUSY) == 0) {
929 scic_sds_unsolicited_frame_control_get_buffer(
930 &(stp_req->parent.owning_controller->uf_control),
931 frame_index,
932 (void **)&frame_buffer);
934 scic_sds_controller_copy_sata_response(
935 &stp_req->d2h_reg_fis,
936 (u32 *)frame_header,
937 frame_buffer);
939 scic_sds_request_set_status(
940 &stp_req->parent,
941 SCU_TASK_DONE_CHECK_RESPONSE,
942 SCI_FAILURE_IO_RESPONSE_VALID);
944 sci_base_state_machine_change_state(
945 &stp_req->parent.state_machine,
946 SCI_BASE_REQUEST_STATE_COMPLETED);
947 } else {
948 /* Now why is the drive sending a D2H Register
949 * FIS when it is still busy?
950 * Do nothing since we are still in the right
951 * state.
953 dev_dbg(scic_to_dev(sci_req->owning_controller),
954 "%s: SCIC PIO Request 0x%p received "
955 "D2H Register FIS with BSY status "
956 "0x%x\n",
957 __func__,
958 stp_req,
959 frame_header->status);
961 break;
963 default:
964 /* FIXME: what do we do here? */
965 break;
968 /* Frame is decoded return it to the controller */
969 scic_sds_controller_release_frame(
970 stp_req->parent.owning_controller,
971 frame_index);
972 } else
973 dev_err(scic_to_dev(sci_req->owning_controller),
974 "%s: SCIC IO Request 0x%p could not get frame header "
975 "for frame index %d, status %x\n",
976 __func__, stp_req, frame_index, status);
978 return status;
983 * @sci_req:
984 * @frame_index:
986 * enum sci_status
988 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_frame_handler(
989 struct scic_sds_request *sci_req,
990 u32 frame_index)
992 enum sci_status status;
993 struct dev_to_host_fis *frame_header;
994 struct sata_fis_data *frame_buffer;
995 struct scic_sds_stp_request *stp_req =
996 container_of(sci_req, typeof(*stp_req), parent);
998 status = scic_sds_unsolicited_frame_control_get_header(
999 &(stp_req->parent.owning_controller->uf_control),
1000 frame_index,
1001 (void **)&frame_header);
1003 if (status == SCI_SUCCESS) {
1004 if (frame_header->fis_type == FIS_DATA) {
1005 if (stp_req->type.pio.request_current.sgl_pair ==
1006 NULL) {
1007 stp_req->parent.saved_rx_frame_index =
1008 frame_index;
1009 stp_req->type.pio.pio_transfer_bytes = 0;
1010 } else {
1011 status = scic_sds_unsolicited_frame_control_get_buffer(
1012 &(stp_req->parent.owning_controller->uf_control),
1013 frame_index,
1014 (void **)&frame_buffer);
1016 status = scic_sds_stp_request_pio_data_in_copy_data(
1017 stp_req,
1018 (u8 *)frame_buffer);
1020 /* Frame is decoded return it to the controller */
1021 scic_sds_controller_release_frame(
1022 stp_req->parent.owning_controller,
1023 frame_index);
1027 * Check for the end of the transfer, are there more
1028 * bytes remaining for this data transfer
1030 if ((status == SCI_SUCCESS) &&
1031 (stp_req->type.pio.pio_transfer_bytes == 0)) {
1032 if ((stp_req->type.pio.ending_status &
1033 ATA_BUSY) == 0) {
1034 scic_sds_request_set_status(
1035 &stp_req->parent,
1036 SCU_TASK_DONE_CHECK_RESPONSE,
1037 SCI_FAILURE_IO_RESPONSE_VALID);
1039 sci_base_state_machine_change_state(
1040 &stp_req->parent.state_machine,
1041 SCI_BASE_REQUEST_STATE_COMPLETED);
1042 } else {
1043 sci_base_state_machine_change_state(
1044 &sci_req->started_substate_machine,
1045 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
1048 } else {
1049 dev_err(scic_to_dev(sci_req->owning_controller),
1050 "%s: SCIC PIO Request 0x%p received frame %d "
1051 "with fis type 0x%02x when expecting a data "
1052 "fis.\n",
1053 __func__,
1054 stp_req,
1055 frame_index,
1056 frame_header->fis_type);
1058 scic_sds_request_set_status(
1059 &stp_req->parent,
1060 SCU_TASK_DONE_GOOD,
1061 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1063 sci_base_state_machine_change_state(
1064 &stp_req->parent.state_machine,
1065 SCI_BASE_REQUEST_STATE_COMPLETED);
1067 /* Frame is decoded return it to the controller */
1068 scic_sds_controller_release_frame(
1069 stp_req->parent.owning_controller,
1070 frame_index);
1072 } else
1073 dev_err(scic_to_dev(sci_req->owning_controller),
1074 "%s: SCIC IO Request 0x%p could not get frame header "
1075 "for frame index %d, status %x\n",
1076 __func__, stp_req, frame_index, status);
1078 return status;
1084 * @sci_req:
1085 * @completion_code:
1087 * enum sci_status
1089 static enum sci_status scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler(
1091 struct scic_sds_request *sci_req,
1092 u32 completion_code)
1094 enum sci_status status = SCI_SUCCESS;
1095 bool all_frames_transferred = false;
1096 struct scic_sds_stp_request *stp_req =
1097 container_of(sci_req, typeof(*stp_req), parent);
1099 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1100 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1101 /* Transmit data */
1102 if (stp_req->type.pio.pio_transfer_bytes != 0) {
1103 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1104 if (status == SCI_SUCCESS) {
1105 if (stp_req->type.pio.pio_transfer_bytes == 0)
1106 all_frames_transferred = true;
1108 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1110 * this will happen if the all data is written at the
1111 * first time after the pio setup fis is received
1113 all_frames_transferred = true;
1116 /* all data transferred. */
1117 if (all_frames_transferred) {
1119 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
1120 * and wait for PIO_SETUP fis / or D2H REg fis. */
1121 sci_base_state_machine_change_state(
1122 &sci_req->started_substate_machine,
1123 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1126 break;
1128 default:
1130 * All other completion status cause the IO to be complete. If a NAK
1131 * was received, then it is up to the user to retry the request. */
1132 scic_sds_request_set_status(
1133 sci_req,
1134 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1135 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1138 sci_base_state_machine_change_state(
1139 &sci_req->state_machine,
1140 SCI_BASE_REQUEST_STATE_COMPLETED
1142 break;
1145 return status;
1150 * @request: This is the request which is receiving the event.
1151 * @event_code: This is the event code that the request on which the request is
1152 * expected to take action.
1154 * This method will handle any link layer events while waiting for the data
1155 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
1157 static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
1158 struct scic_sds_request *request,
1159 u32 event_code)
1161 enum sci_status status;
1163 switch (scu_get_event_specifier(event_code)) {
1164 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
1166 * We are waiting for data and the SCU has R_ERR the data frame.
1167 * Go back to waiting for the D2H Register FIS */
1168 sci_base_state_machine_change_state(
1169 &request->started_substate_machine,
1170 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1173 status = SCI_SUCCESS;
1174 break;
1176 default:
1177 dev_err(scic_to_dev(request->owning_controller),
1178 "%s: SCIC PIO Request 0x%p received unexpected "
1179 "event 0x%08x\n",
1180 __func__, request, event_code);
1182 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1183 status = SCI_FAILURE;
1184 break;
1187 return status;
1190 /* --------------------------------------------------------------------------- */
1192 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_pio_substate_handler_table[] = {
1193 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1194 .abort_handler = scic_sds_request_started_state_abort_handler,
1195 .tc_completion_handler = scic_sds_stp_request_pio_await_h2d_completion_tc_completion_handler,
1197 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1198 .abort_handler = scic_sds_request_started_state_abort_handler,
1199 .frame_handler = scic_sds_stp_request_pio_await_frame_frame_handler
1201 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1202 .abort_handler = scic_sds_request_started_state_abort_handler,
1203 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
1204 .frame_handler = scic_sds_stp_request_pio_data_in_await_data_frame_handler
1206 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1207 .abort_handler = scic_sds_request_started_state_abort_handler,
1208 .tc_completion_handler = scic_sds_stp_request_pio_data_out_await_data_transmit_completion_tc_completion_handler,
1212 static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
1213 void *object)
1215 struct scic_sds_request *sci_req = object;
1217 SET_STATE_HANDLER(
1218 sci_req,
1219 scic_sds_stp_request_started_pio_substate_handler_table,
1220 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
1223 scic_sds_remote_device_set_working_request(
1224 sci_req->target_device, sci_req);
1227 static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
1229 struct scic_sds_request *sci_req = object;
1231 SET_STATE_HANDLER(
1232 sci_req,
1233 scic_sds_stp_request_started_pio_substate_handler_table,
1234 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1238 static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
1239 void *object)
1241 struct scic_sds_request *sci_req = object;
1243 SET_STATE_HANDLER(
1244 sci_req,
1245 scic_sds_stp_request_started_pio_substate_handler_table,
1246 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
1250 static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
1251 void *object)
1253 struct scic_sds_request *sci_req = object;
1255 SET_STATE_HANDLER(
1256 sci_req,
1257 scic_sds_stp_request_started_pio_substate_handler_table,
1258 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
1262 /* --------------------------------------------------------------------------- */
1264 static const struct sci_base_state scic_sds_stp_request_started_pio_substate_table[] = {
1265 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
1266 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
1268 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
1269 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
1271 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
1272 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
1274 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
1275 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
1279 enum sci_status
1280 scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
1281 bool copy_rx_frame)
1283 struct scic_sds_stp_request *stp_req =
1284 container_of(sci_req, typeof(*stp_req), parent);
1285 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
1287 scic_sds_stp_non_ncq_request_construct(sci_req);
1289 scu_stp_raw_request_construct_task_context(stp_req,
1290 sci_req->task_context_buffer);
1292 pio->current_transfer_bytes = 0;
1293 pio->ending_error = 0;
1294 pio->ending_status = 0;
1296 pio->request_current.sgl_offset = 0;
1297 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1299 if (copy_rx_frame) {
1300 scic_sds_request_build_sgl(sci_req);
1301 /* Since the IO request copy of the TC contains the same data as
1302 * the actual TC this pointer is vaild for either.
1304 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
1305 } else {
1306 /* The user does not want the data copied to the SGL buffer location */
1307 pio->request_current.sgl_pair = NULL;
1310 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1311 sci_req,
1312 scic_sds_stp_request_started_pio_substate_table,
1313 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE);
1315 return SCI_SUCCESS;
1318 static void scic_sds_stp_request_udma_complete_request(
1319 struct scic_sds_request *request,
1320 u32 scu_status,
1321 enum sci_status sci_status)
1323 scic_sds_request_set_status(request, scu_status, sci_status);
1324 sci_base_state_machine_change_state(&request->state_machine,
1325 SCI_BASE_REQUEST_STATE_COMPLETED);
1330 * @sci_req:
1331 * @frame_index:
1333 * enum sci_status
1335 static enum sci_status scic_sds_stp_request_udma_general_frame_handler(
1336 struct scic_sds_request *sci_req,
1337 u32 frame_index)
1339 enum sci_status status;
1340 struct dev_to_host_fis *frame_header;
1341 u32 *frame_buffer;
1343 status = scic_sds_unsolicited_frame_control_get_header(
1344 &sci_req->owning_controller->uf_control,
1345 frame_index,
1346 (void **)&frame_header);
1348 if ((status == SCI_SUCCESS) &&
1349 (frame_header->fis_type == FIS_REGD2H)) {
1350 scic_sds_unsolicited_frame_control_get_buffer(
1351 &sci_req->owning_controller->uf_control,
1352 frame_index,
1353 (void **)&frame_buffer);
1355 scic_sds_controller_copy_sata_response(
1356 &((struct scic_sds_stp_request *)sci_req)->d2h_reg_fis,
1357 (u32 *)frame_header,
1358 frame_buffer);
1361 scic_sds_controller_release_frame(
1362 sci_req->owning_controller, frame_index);
1364 return status;
1368 * This method process TC completions while in the state where we are waiting
1369 * for TC completions.
1370 * @sci_req:
1371 * @completion_code:
1373 * enum sci_status
1375 static enum sci_status scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler(
1376 struct scic_sds_request *request,
1377 u32 completion_code)
1379 enum sci_status status = SCI_SUCCESS;
1380 struct scic_sds_stp_request *sci_req = (struct scic_sds_stp_request *)request;
1382 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1383 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1384 scic_sds_stp_request_udma_complete_request(
1385 &sci_req->parent, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1387 break;
1389 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
1390 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1392 * We must check ther response buffer to see if the D2H Register FIS was
1393 * received before we got the TC completion. */
1394 if (sci_req->d2h_reg_fis.fis_type == FIS_REGD2H) {
1395 scic_sds_remote_device_suspend(
1396 sci_req->parent.target_device,
1397 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))
1400 scic_sds_stp_request_udma_complete_request(
1401 &sci_req->parent,
1402 SCU_TASK_DONE_CHECK_RESPONSE,
1403 SCI_FAILURE_IO_RESPONSE_VALID
1405 } else {
1407 * If we have an error completion status for the TC then we can expect a
1408 * D2H register FIS from the device so we must change state to wait for it */
1409 sci_base_state_machine_change_state(
1410 &sci_req->parent.started_substate_machine,
1411 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1414 break;
1417 * / @todo Check to see if any of these completion status need to wait for
1418 * / the device to host register fis. */
1419 /* / @todo We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR - this comes only for B0 */
1420 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
1421 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1422 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
1423 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
1424 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
1425 scic_sds_remote_device_suspend(
1426 sci_req->parent.target_device,
1427 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))
1429 /* Fall through to the default case */
1430 default:
1431 /* All other completion status cause the IO to be complete. */
1432 scic_sds_stp_request_udma_complete_request(
1433 &sci_req->parent,
1434 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1435 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1437 break;
1440 return status;
1443 static enum sci_status scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler(
1444 struct scic_sds_request *sci_req,
1445 u32 frame_index)
1447 enum sci_status status;
1449 /* Use the general frame handler to copy the resposne data */
1450 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1452 if (status == SCI_SUCCESS) {
1453 scic_sds_stp_request_udma_complete_request(
1454 sci_req,
1455 SCU_TASK_DONE_CHECK_RESPONSE,
1456 SCI_FAILURE_IO_RESPONSE_VALID
1460 return status;
1463 /* --------------------------------------------------------------------------- */
1465 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_udma_substate_handler_table[] = {
1466 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1467 .abort_handler = scic_sds_request_started_state_abort_handler,
1468 .tc_completion_handler = scic_sds_stp_request_udma_await_tc_completion_tc_completion_handler,
1469 .frame_handler = scic_sds_stp_request_udma_general_frame_handler,
1471 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1472 .abort_handler = scic_sds_request_started_state_abort_handler,
1473 .frame_handler = scic_sds_stp_request_udma_await_d2h_reg_fis_frame_handler,
1477 static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
1478 void *object)
1480 struct scic_sds_request *sci_req = object;
1482 SET_STATE_HANDLER(
1483 sci_req,
1484 scic_sds_stp_request_started_udma_substate_handler_table,
1485 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1492 * This state is entered when there is an TC completion failure. The hardware
1493 * received an unexpected condition while processing the IO request and now
1494 * will UF the D2H register FIS to complete the IO.
1496 static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
1497 void *object)
1499 struct scic_sds_request *sci_req = object;
1501 SET_STATE_HANDLER(
1502 sci_req,
1503 scic_sds_stp_request_started_udma_substate_handler_table,
1504 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
1508 /* --------------------------------------------------------------------------- */
1510 static const struct sci_base_state scic_sds_stp_request_started_udma_substate_table[] = {
1511 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
1512 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
1514 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
1515 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
1519 enum sci_status scic_sds_stp_udma_request_construct(struct scic_sds_request *sci_req,
1520 u32 len,
1521 enum dma_data_direction dir)
1523 scic_sds_stp_non_ncq_request_construct(sci_req);
1525 scic_sds_stp_optimized_request_construct(sci_req, SCU_TASK_TYPE_DMA_IN,
1526 len, dir);
1528 sci_base_state_machine_construct(
1529 &sci_req->started_substate_machine,
1530 sci_req,
1531 scic_sds_stp_request_started_udma_substate_table,
1532 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
1535 return SCI_SUCCESS;
1540 * @sci_req:
1541 * @completion_code:
1543 * This method processes a TC completion. The expected TC completion is for
1544 * the transmission of the H2D register FIS containing the SATA/STP non-data
1545 * request. This method always successfully processes the TC completion.
1546 * SCI_SUCCESS This value is always returned.
1548 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler(
1549 struct scic_sds_request *sci_req,
1550 u32 completion_code)
1552 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1553 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1554 scic_sds_request_set_status(
1555 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1558 sci_base_state_machine_change_state(
1559 &sci_req->started_substate_machine,
1560 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1562 break;
1564 default:
1566 * All other completion status cause the IO to be complete. If a NAK
1567 * was received, then it is up to the user to retry the request. */
1568 scic_sds_request_set_status(
1569 sci_req,
1570 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1571 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1574 sci_base_state_machine_change_state(
1575 &sci_req->state_machine, SCI_BASE_REQUEST_STATE_COMPLETED);
1576 break;
1579 return SCI_SUCCESS;
1584 * @sci_req:
1585 * @completion_code:
1587 * This method processes a TC completion. The expected TC completion is for
1588 * the transmission of the H2D register FIS containing the SATA/STP non-data
1589 * request. This method always successfully processes the TC completion.
1590 * SCI_SUCCESS This value is always returned.
1592 static enum sci_status scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler(
1593 struct scic_sds_request *sci_req,
1594 u32 completion_code)
1596 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1597 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1598 scic_sds_request_set_status(
1599 sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS
1602 sci_base_state_machine_change_state(
1603 &sci_req->started_substate_machine,
1604 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1606 break;
1608 default:
1610 * All other completion status cause the IO to be complete. If a NAK
1611 * was received, then it is up to the user to retry the request. */
1612 scic_sds_request_set_status(
1613 sci_req,
1614 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1615 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1618 sci_base_state_machine_change_state(&sci_req->state_machine,
1619 SCI_BASE_REQUEST_STATE_COMPLETED);
1620 break;
1623 return SCI_SUCCESS;
1628 * @request: This parameter specifies the request for which a frame has been
1629 * received.
1630 * @frame_index: This parameter specifies the index of the frame that has been
1631 * received.
1633 * This method processes frames received from the target while waiting for a
1634 * device to host register FIS. If a non-register FIS is received during this
1635 * time, it is treated as a protocol violation from an IO perspective. Indicate
1636 * if the received frame was processed successfully.
1638 static enum sci_status scic_sds_stp_request_soft_reset_await_d2h_frame_handler(
1639 struct scic_sds_request *request,
1640 u32 frame_index)
1642 enum sci_status status;
1643 struct dev_to_host_fis *frame_header;
1644 u32 *frame_buffer;
1645 struct scic_sds_stp_request *stp_req =
1646 (struct scic_sds_stp_request *)request;
1648 status = scic_sds_unsolicited_frame_control_get_header(
1649 &(stp_req->parent.owning_controller->uf_control),
1650 frame_index,
1651 (void **)&frame_header);
1653 if (status == SCI_SUCCESS) {
1654 switch (frame_header->fis_type) {
1655 case FIS_REGD2H:
1656 scic_sds_unsolicited_frame_control_get_buffer(
1657 &(stp_req->parent.owning_controller->uf_control),
1658 frame_index,
1659 (void **)&frame_buffer);
1661 scic_sds_controller_copy_sata_response(
1662 &stp_req->d2h_reg_fis,
1663 (u32 *)frame_header,
1664 frame_buffer);
1666 /* The command has completed with error */
1667 scic_sds_request_set_status(
1668 &stp_req->parent,
1669 SCU_TASK_DONE_CHECK_RESPONSE,
1670 SCI_FAILURE_IO_RESPONSE_VALID);
1671 break;
1673 default:
1674 dev_warn(scic_to_dev(request->owning_controller),
1675 "%s: IO Request:0x%p Frame Id:%d protocol "
1676 "violation occurred\n",
1677 __func__,
1678 stp_req,
1679 frame_index);
1681 scic_sds_request_set_status(
1682 &stp_req->parent,
1683 SCU_TASK_DONE_UNEXP_FIS,
1684 SCI_FAILURE_PROTOCOL_VIOLATION);
1685 break;
1688 sci_base_state_machine_change_state(
1689 &stp_req->parent.state_machine,
1690 SCI_BASE_REQUEST_STATE_COMPLETED);
1692 /* Frame has been decoded return it to the controller */
1693 scic_sds_controller_release_frame(
1694 stp_req->parent.owning_controller, frame_index);
1695 } else
1696 dev_err(scic_to_dev(request->owning_controller),
1697 "%s: SCIC IO Request 0x%p could not get frame header "
1698 "for frame index %d, status %x\n",
1699 __func__, stp_req, frame_index, status);
1701 return status;
1704 /* --------------------------------------------------------------------------- */
1706 static const struct scic_sds_io_request_state_handler scic_sds_stp_request_started_soft_reset_substate_handler_table[] = {
1707 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1708 .abort_handler = scic_sds_request_started_state_abort_handler,
1709 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_asserted_tc_completion_handler,
1711 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1712 .abort_handler = scic_sds_request_started_state_abort_handler,
1713 .tc_completion_handler = scic_sds_stp_request_soft_reset_await_h2d_diagnostic_tc_completion_handler,
1715 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1716 .abort_handler = scic_sds_request_started_state_abort_handler,
1717 .frame_handler = scic_sds_stp_request_soft_reset_await_d2h_frame_handler,
1721 static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
1722 void *object)
1724 struct scic_sds_request *sci_req = object;
1726 SET_STATE_HANDLER(
1727 sci_req,
1728 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1729 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
1732 scic_sds_remote_device_set_working_request(
1733 sci_req->target_device, sci_req
1737 static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
1738 void *object)
1740 struct scic_sds_request *sci_req = object;
1741 struct scu_task_context *task_context;
1742 struct host_to_dev_fis *h2d_fis;
1743 enum sci_status status;
1745 /* Clear the SRST bit */
1746 h2d_fis = scic_stp_io_request_get_h2d_reg_address(sci_req);
1747 h2d_fis->control = 0;
1749 /* Clear the TC control bit */
1750 task_context = scic_sds_controller_get_task_context_buffer(
1751 sci_req->owning_controller, sci_req->io_tag);
1752 task_context->control_frame = 0;
1754 status = scic_controller_continue_io(sci_req);
1755 if (status == SCI_SUCCESS) {
1756 SET_STATE_HANDLER(
1757 sci_req,
1758 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1759 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
1764 static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
1765 void *object)
1767 struct scic_sds_request *sci_req = object;
1769 SET_STATE_HANDLER(
1770 sci_req,
1771 scic_sds_stp_request_started_soft_reset_substate_handler_table,
1772 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
1776 static const struct sci_base_state scic_sds_stp_request_started_soft_reset_substate_table[] = {
1777 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
1778 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
1780 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
1781 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
1783 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
1784 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
1788 enum sci_status scic_sds_stp_soft_reset_request_construct(struct scic_sds_request *sci_req)
1790 struct scic_sds_stp_request *stp_req =
1791 container_of(sci_req, typeof(*stp_req), parent);
1793 scic_sds_stp_non_ncq_request_construct(sci_req);
1795 /* Build the STP task context structure */
1796 scu_stp_raw_request_construct_task_context(stp_req, sci_req->task_context_buffer);
1798 sci_base_state_machine_construct(&sci_req->started_substate_machine,
1799 sci_req,
1800 scic_sds_stp_request_started_soft_reset_substate_table,
1801 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
1803 return SCI_SUCCESS;