2 * Serial Attached SCSI (SAS) class SCSI Host glue.
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 * This file is licensed under GPLv2.
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
26 #include "sas_internal.h"
28 #include <scsi/scsi_host.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_tcq.h>
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_transport.h>
34 #include <scsi/scsi_transport_sas.h>
35 #include "../scsi_sas_internal.h"
36 #include "../scsi_transport_api.h"
38 #include <linux/err.h>
39 #include <linux/blkdev.h>
40 #include <linux/scatterlist.h>
42 /* ---------- SCSI Host glue ---------- */
44 #define TO_SAS_TASK(_scsi_cmd) ((void *)(_scsi_cmd)->host_scribble)
45 #define ASSIGN_SAS_TASK(_sc, _t) do { (_sc)->host_scribble = (void *) _t; } while (0)
47 static void sas_scsi_task_done(struct sas_task
*task
)
49 struct task_status_struct
*ts
= &task
->task_status
;
50 struct scsi_cmnd
*sc
= task
->uldd_task
;
51 struct sas_ha_struct
*sas_ha
= SHOST_TO_SAS_HA(sc
->device
->host
);
52 unsigned ts_flags
= task
->task_state_flags
;
56 SAS_DPRINTK("task_done called with non existing SCSI cmnd!\n");
57 list_del_init(&task
->list
);
62 if (ts
->resp
== SAS_TASK_UNDELIVERED
) {
65 } else { /* ts->resp == SAS_TASK_COMPLETE */
66 /* task delivered, what happened afterwards? */
68 case SAS_DEV_NO_RESPONSE
:
75 case SAS_DATA_UNDERRUN
:
76 sc
->resid
= ts
->residual
;
77 if (sc
->request_bufflen
- sc
->resid
< sc
->underflow
)
80 case SAS_DATA_OVERRUN
:
84 hs
= DID_SOFT_ERROR
; /* retry */
86 case SAS_DEVICE_UNKNOWN
:
93 if (ts
->open_rej_reason
== SAS_OREJ_RSVD_RETRY
)
94 hs
= DID_SOFT_ERROR
; /* retry */
98 case SAS_PROTO_RESPONSE
:
99 SAS_DPRINTK("LLDD:%s sent SAS_PROTO_RESP for an SSP "
100 "task; please report this\n",
101 task
->dev
->port
->ha
->sas_ha_name
);
103 case SAS_ABORTED_TASK
:
107 memcpy(sc
->sense_buffer
, ts
->buf
,
108 max(SCSI_SENSE_BUFFERSIZE
, ts
->buf_valid_size
));
109 stat
= SAM_CHECK_COND
;
116 ASSIGN_SAS_TASK(sc
, NULL
);
117 sc
->result
= (hs
<< 16) | stat
;
118 list_del_init(&task
->list
);
120 /* This is very ugly but this is how SCSI Core works. */
121 if (ts_flags
& SAS_TASK_STATE_ABORTED
)
122 scsi_eh_finish_cmd(sc
, &sas_ha
->eh_done_q
);
127 static enum task_attribute
sas_scsi_get_task_attr(struct scsi_cmnd
*cmd
)
129 enum task_attribute ta
= TASK_ATTR_SIMPLE
;
130 if (cmd
->request
&& blk_rq_tagged(cmd
->request
)) {
131 if (cmd
->device
->ordered_tags
&&
132 (cmd
->request
->cmd_flags
& REQ_HARDBARRIER
))
138 static struct sas_task
*sas_create_task(struct scsi_cmnd
*cmd
,
139 struct domain_device
*dev
,
142 struct sas_task
*task
= sas_alloc_task(gfp_flags
);
148 *(u32
*)cmd
->sense_buffer
= 0;
149 task
->uldd_task
= cmd
;
150 ASSIGN_SAS_TASK(cmd
, task
);
153 task
->task_proto
= task
->dev
->tproto
; /* BUG_ON(!SSP) */
155 task
->ssp_task
.retry_count
= 1;
156 int_to_scsilun(cmd
->device
->lun
, &lun
);
157 memcpy(task
->ssp_task
.LUN
, &lun
.scsi_lun
, 8);
158 task
->ssp_task
.task_attr
= sas_scsi_get_task_attr(cmd
);
159 memcpy(task
->ssp_task
.cdb
, cmd
->cmnd
, 16);
161 task
->scatter
= cmd
->request_buffer
;
162 task
->num_scatter
= cmd
->use_sg
;
163 task
->total_xfer_len
= cmd
->request_bufflen
;
164 task
->data_dir
= cmd
->sc_data_direction
;
166 task
->task_done
= sas_scsi_task_done
;
171 static int sas_queue_up(struct sas_task
*task
)
173 struct sas_ha_struct
*sas_ha
= task
->dev
->port
->ha
;
174 struct scsi_core
*core
= &sas_ha
->core
;
178 spin_lock_irqsave(&core
->task_queue_lock
, flags
);
179 if (sas_ha
->lldd_queue_size
< core
->task_queue_size
+ 1) {
180 spin_unlock_irqrestore(&core
->task_queue_lock
, flags
);
181 return -SAS_QUEUE_FULL
;
183 list_add_tail(&task
->list
, &core
->task_queue
);
184 core
->task_queue_size
+= 1;
185 spin_unlock_irqrestore(&core
->task_queue_lock
, flags
);
186 up(&core
->queue_thread_sema
);
192 * sas_queuecommand -- Enqueue a command for processing
193 * @parameters: See SCSI Core documentation
195 * Note: XXX: Remove the host unlock/lock pair when SCSI Core can
196 * call us without holding an IRQ spinlock...
198 int sas_queuecommand(struct scsi_cmnd
*cmd
,
199 void (*scsi_done
)(struct scsi_cmnd
*))
202 struct domain_device
*dev
= cmd_to_domain_dev(cmd
);
203 struct Scsi_Host
*host
= cmd
->device
->host
;
204 struct sas_internal
*i
= to_sas_internal(host
->transportt
);
206 spin_unlock_irq(host
->host_lock
);
209 struct sas_ha_struct
*sas_ha
= dev
->port
->ha
;
210 struct sas_task
*task
;
213 task
= sas_create_task(cmd
, dev
, GFP_ATOMIC
);
217 cmd
->scsi_done
= scsi_done
;
218 /* Queue up, Direct Mode or Task Collector Mode. */
219 if (sas_ha
->lldd_max_execute_num
< 2)
220 res
= i
->dft
->lldd_execute_task(task
, 1, GFP_ATOMIC
);
222 res
= sas_queue_up(task
);
226 SAS_DPRINTK("lldd_execute_task returned: %d\n", res
);
227 ASSIGN_SAS_TASK(cmd
, NULL
);
229 if (res
== -SAS_QUEUE_FULL
) {
230 cmd
->result
= DID_SOFT_ERROR
<< 16; /* retry */
238 spin_lock_irq(host
->host_lock
);
242 static void sas_scsi_clear_queue_lu(struct list_head
*error_q
, struct scsi_cmnd
*my_cmd
)
244 struct scsi_cmnd
*cmd
, *n
;
246 list_for_each_entry_safe(cmd
, n
, error_q
, eh_entry
) {
248 list_del_init(&cmd
->eh_entry
);
252 static void sas_scsi_clear_queue_I_T(struct list_head
*error_q
,
253 struct domain_device
*dev
)
255 struct scsi_cmnd
*cmd
, *n
;
257 list_for_each_entry_safe(cmd
, n
, error_q
, eh_entry
) {
258 struct domain_device
*x
= cmd_to_domain_dev(cmd
);
261 list_del_init(&cmd
->eh_entry
);
265 static void sas_scsi_clear_queue_port(struct list_head
*error_q
,
266 struct asd_sas_port
*port
)
268 struct scsi_cmnd
*cmd
, *n
;
270 list_for_each_entry_safe(cmd
, n
, error_q
, eh_entry
) {
271 struct domain_device
*dev
= cmd_to_domain_dev(cmd
);
272 struct asd_sas_port
*x
= dev
->port
;
275 list_del_init(&cmd
->eh_entry
);
279 enum task_disposition
{
286 static enum task_disposition
sas_scsi_find_task(struct sas_task
*task
)
288 struct sas_ha_struct
*ha
= task
->dev
->port
->ha
;
291 struct sas_internal
*si
=
292 to_sas_internal(task
->dev
->port
->ha
->core
.shost
->transportt
);
294 if (ha
->lldd_max_execute_num
> 1) {
295 struct scsi_core
*core
= &ha
->core
;
296 struct sas_task
*t
, *n
;
298 spin_lock_irqsave(&core
->task_queue_lock
, flags
);
299 list_for_each_entry_safe(t
, n
, &core
->task_queue
, list
) {
301 list_del_init(&t
->list
);
302 spin_unlock_irqrestore(&core
->task_queue_lock
,
304 SAS_DPRINTK("%s: task 0x%p aborted from "
307 return TASK_IS_ABORTED
;
310 spin_unlock_irqrestore(&core
->task_queue_lock
, flags
);
313 for (i
= 0; i
< 5; i
++) {
314 SAS_DPRINTK("%s: aborting task 0x%p\n", __FUNCTION__
, task
);
315 res
= si
->dft
->lldd_abort_task(task
);
317 spin_lock_irqsave(&task
->task_state_lock
, flags
);
318 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
319 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
320 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__
,
324 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
326 if (res
== TMF_RESP_FUNC_COMPLETE
) {
327 SAS_DPRINTK("%s: task 0x%p is aborted\n",
329 return TASK_IS_ABORTED
;
330 } else if (si
->dft
->lldd_query_task
) {
331 SAS_DPRINTK("%s: querying task 0x%p\n",
333 res
= si
->dft
->lldd_query_task(task
);
334 if (res
== TMF_RESP_FUNC_SUCC
) {
335 SAS_DPRINTK("%s: task 0x%p at LU\n",
337 return TASK_IS_AT_LU
;
338 } else if (res
== TMF_RESP_FUNC_COMPLETE
) {
339 SAS_DPRINTK("%s: task 0x%p not at LU\n",
341 return TASK_IS_NOT_AT_LU
;
348 static int sas_recover_lu(struct domain_device
*dev
, struct scsi_cmnd
*cmd
)
350 int res
= TMF_RESP_FUNC_FAILED
;
352 struct sas_internal
*i
=
353 to_sas_internal(dev
->port
->ha
->core
.shost
->transportt
);
355 int_to_scsilun(cmd
->device
->lun
, &lun
);
357 SAS_DPRINTK("eh: device %llx LUN %x has the task\n",
358 SAS_ADDR(dev
->sas_addr
),
361 if (i
->dft
->lldd_abort_task_set
)
362 res
= i
->dft
->lldd_abort_task_set(dev
, lun
.scsi_lun
);
364 if (res
== TMF_RESP_FUNC_FAILED
) {
365 if (i
->dft
->lldd_clear_task_set
)
366 res
= i
->dft
->lldd_clear_task_set(dev
, lun
.scsi_lun
);
369 if (res
== TMF_RESP_FUNC_FAILED
) {
370 if (i
->dft
->lldd_lu_reset
)
371 res
= i
->dft
->lldd_lu_reset(dev
, lun
.scsi_lun
);
377 static int sas_recover_I_T(struct domain_device
*dev
)
379 int res
= TMF_RESP_FUNC_FAILED
;
380 struct sas_internal
*i
=
381 to_sas_internal(dev
->port
->ha
->core
.shost
->transportt
);
383 SAS_DPRINTK("I_T nexus reset for dev %016llx\n",
384 SAS_ADDR(dev
->sas_addr
));
386 if (i
->dft
->lldd_I_T_nexus_reset
)
387 res
= i
->dft
->lldd_I_T_nexus_reset(dev
);
392 static int eh_reset_phy_helper(struct sas_phy
*phy
)
396 tmf_resp
= sas_phy_reset(phy
, 1);
398 SAS_DPRINTK("Hard reset of phy %d failed 0x%x\n",
399 phy
->identify
.phy_identifier
,
405 void sas_scsi_recover_host(struct Scsi_Host
*shost
)
407 struct sas_ha_struct
*ha
= SHOST_TO_SAS_HA(shost
);
410 struct scsi_cmnd
*cmd
, *n
;
411 enum task_disposition res
= TASK_IS_DONE
;
412 int tmf_resp
, need_reset
;
413 struct sas_internal
*i
= to_sas_internal(shost
->transportt
);
414 struct sas_phy
*task_sas_phy
= NULL
;
416 spin_lock_irqsave(shost
->host_lock
, flags
);
417 list_splice_init(&shost
->eh_cmd_q
, &error_q
);
418 spin_unlock_irqrestore(shost
->host_lock
, flags
);
420 SAS_DPRINTK("Enter %s\n", __FUNCTION__
);
422 /* All tasks on this list were marked SAS_TASK_STATE_ABORTED
423 * by sas_scsi_timed_out() callback.
426 SAS_DPRINTK("going over list...\n");
427 list_for_each_entry_safe(cmd
, n
, &error_q
, eh_entry
) {
428 struct sas_task
*task
= TO_SAS_TASK(cmd
);
429 list_del_init(&cmd
->eh_entry
);
432 SAS_DPRINTK("%s: taskless cmd?!\n", __FUNCTION__
);
436 spin_lock_irqsave(&task
->task_state_lock
, flags
);
437 need_reset
= task
->task_state_flags
& SAS_TASK_NEED_DEV_RESET
;
439 task_sas_phy
= task
->dev
->port
->phy
;
440 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
442 SAS_DPRINTK("trying to find task 0x%p\n", task
);
443 res
= sas_scsi_find_task(task
);
449 SAS_DPRINTK("%s: task 0x%p is done\n", __FUNCTION__
,
451 task
->task_done(task
);
453 eh_reset_phy_helper(task_sas_phy
);
455 case TASK_IS_ABORTED
:
456 SAS_DPRINTK("%s: task 0x%p is aborted\n",
458 task
->task_done(task
);
460 eh_reset_phy_helper(task_sas_phy
);
463 SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task
);
464 tmf_resp
= sas_recover_lu(task
->dev
, cmd
);
465 if (tmf_resp
== TMF_RESP_FUNC_COMPLETE
) {
466 SAS_DPRINTK("dev %016llx LU %x is "
470 task
->task_done(task
);
472 eh_reset_phy_helper(task_sas_phy
);
473 sas_scsi_clear_queue_lu(&error_q
, cmd
);
477 case TASK_IS_NOT_AT_LU
:
478 SAS_DPRINTK("task 0x%p is not at LU: I_T recover\n",
480 tmf_resp
= sas_recover_I_T(task
->dev
);
481 if (tmf_resp
== TMF_RESP_FUNC_COMPLETE
) {
482 SAS_DPRINTK("I_T %016llx recovered\n",
483 SAS_ADDR(task
->dev
->sas_addr
));
484 task
->task_done(task
);
486 eh_reset_phy_helper(task_sas_phy
);
487 sas_scsi_clear_queue_I_T(&error_q
, task
->dev
);
490 /* Hammer time :-) */
491 if (i
->dft
->lldd_clear_nexus_port
) {
492 struct asd_sas_port
*port
= task
->dev
->port
;
493 SAS_DPRINTK("clearing nexus for port:%d\n",
495 res
= i
->dft
->lldd_clear_nexus_port(port
);
496 if (res
== TMF_RESP_FUNC_COMPLETE
) {
497 SAS_DPRINTK("clear nexus port:%d "
498 "succeeded\n", port
->id
);
499 task
->task_done(task
);
501 eh_reset_phy_helper(task_sas_phy
);
502 sas_scsi_clear_queue_port(&error_q
,
507 if (i
->dft
->lldd_clear_nexus_ha
) {
508 SAS_DPRINTK("clear nexus ha\n");
509 res
= i
->dft
->lldd_clear_nexus_ha(ha
);
510 if (res
== TMF_RESP_FUNC_COMPLETE
) {
511 SAS_DPRINTK("clear nexus ha "
513 task
->task_done(task
);
515 eh_reset_phy_helper(task_sas_phy
);
519 /* If we are here -- this means that no amount
520 * of effort could recover from errors. Quite
521 * possibly the HA just disappeared.
523 SAS_DPRINTK("error from device %llx, LUN %x "
524 "couldn't be recovered in any way\n",
525 SAS_ADDR(task
->dev
->sas_addr
),
528 task
->task_done(task
);
530 eh_reset_phy_helper(task_sas_phy
);
535 scsi_eh_flush_done_q(&ha
->eh_done_q
);
536 SAS_DPRINTK("--- Exit %s\n", __FUNCTION__
);
539 SAS_DPRINTK("--- Exit %s -- clear_q\n", __FUNCTION__
);
540 list_for_each_entry_safe(cmd
, n
, &error_q
, eh_entry
) {
541 struct sas_task
*task
= TO_SAS_TASK(cmd
);
542 list_del_init(&cmd
->eh_entry
);
543 task
->task_done(task
);
547 enum scsi_eh_timer_return
sas_scsi_timed_out(struct scsi_cmnd
*cmd
)
549 struct sas_task
*task
= TO_SAS_TASK(cmd
);
553 cmd
->timeout_per_command
/= 2;
554 SAS_DPRINTK("command 0x%p, task 0x%p, gone: %s\n",
555 cmd
, task
, (cmd
->timeout_per_command
?
556 "EH_RESET_TIMER" : "EH_NOT_HANDLED"));
557 if (!cmd
->timeout_per_command
)
558 return EH_NOT_HANDLED
;
559 return EH_RESET_TIMER
;
562 spin_lock_irqsave(&task
->task_state_lock
, flags
);
563 BUG_ON(task
->task_state_flags
& SAS_TASK_STATE_ABORTED
);
564 if (task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
565 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
566 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_HANDLED\n",
570 if (!(task
->task_state_flags
& SAS_TASK_AT_INITIATOR
)) {
571 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
572 SAS_DPRINTK("command 0x%p, task 0x%p, not at initiator: "
575 return EH_RESET_TIMER
;
577 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
578 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
580 SAS_DPRINTK("command 0x%p, task 0x%p, timed out: EH_NOT_HANDLED\n",
583 return EH_NOT_HANDLED
;
586 struct domain_device
*sas_find_dev_by_rphy(struct sas_rphy
*rphy
)
588 struct Scsi_Host
*shost
= dev_to_shost(rphy
->dev
.parent
);
589 struct sas_ha_struct
*ha
= SHOST_TO_SAS_HA(shost
);
590 struct domain_device
*found_dev
= NULL
;
593 spin_lock(&ha
->phy_port_lock
);
594 for (i
= 0; i
< ha
->num_phys
; i
++) {
595 struct asd_sas_port
*port
= ha
->sas_port
[i
];
596 struct domain_device
*dev
;
598 spin_lock(&port
->dev_list_lock
);
599 list_for_each_entry(dev
, &port
->dev_list
, dev_list_node
) {
600 if (rphy
== dev
->rphy
) {
602 spin_unlock(&port
->dev_list_lock
);
606 spin_unlock(&port
->dev_list_lock
);
609 spin_unlock(&ha
->phy_port_lock
);
614 static inline struct domain_device
*sas_find_target(struct scsi_target
*starget
)
616 struct sas_rphy
*rphy
= dev_to_rphy(starget
->dev
.parent
);
618 return sas_find_dev_by_rphy(rphy
);
621 int sas_target_alloc(struct scsi_target
*starget
)
623 struct domain_device
*found_dev
= sas_find_target(starget
);
628 starget
->hostdata
= found_dev
;
632 #define SAS_DEF_QD 32
633 #define SAS_MAX_QD 64
635 int sas_slave_configure(struct scsi_device
*scsi_dev
)
637 struct domain_device
*dev
= sdev_to_domain_dev(scsi_dev
);
638 struct sas_ha_struct
*sas_ha
;
640 BUG_ON(dev
->rphy
->identify
.device_type
!= SAS_END_DEVICE
);
642 sas_ha
= dev
->port
->ha
;
644 sas_read_port_mode_page(scsi_dev
);
646 if (scsi_dev
->tagged_supported
) {
647 scsi_set_tag_type(scsi_dev
, MSG_SIMPLE_TAG
);
648 scsi_activate_tcq(scsi_dev
, SAS_DEF_QD
);
650 SAS_DPRINTK("device %llx, LUN %x doesn't support "
651 "TCQ\n", SAS_ADDR(dev
->sas_addr
),
653 scsi_dev
->tagged_supported
= 0;
654 scsi_set_tag_type(scsi_dev
, 0);
655 scsi_deactivate_tcq(scsi_dev
, 1);
661 void sas_slave_destroy(struct scsi_device
*scsi_dev
)
665 int sas_change_queue_depth(struct scsi_device
*scsi_dev
, int new_depth
)
667 int res
= min(new_depth
, SAS_MAX_QD
);
669 if (scsi_dev
->tagged_supported
)
670 scsi_adjust_queue_depth(scsi_dev
, scsi_get_tag_type(scsi_dev
),
673 struct domain_device
*dev
= sdev_to_domain_dev(scsi_dev
);
674 sas_printk("device %llx LUN %x queue depth changed to 1\n",
675 SAS_ADDR(dev
->sas_addr
),
677 scsi_adjust_queue_depth(scsi_dev
, 0, 1);
684 int sas_change_queue_type(struct scsi_device
*scsi_dev
, int qt
)
686 if (!scsi_dev
->tagged_supported
)
689 scsi_deactivate_tcq(scsi_dev
, 1);
691 scsi_set_tag_type(scsi_dev
, qt
);
692 scsi_activate_tcq(scsi_dev
, scsi_dev
->queue_depth
);
697 int sas_bios_param(struct scsi_device
*scsi_dev
,
698 struct block_device
*bdev
,
699 sector_t capacity
, int *hsc
)
703 sector_div(capacity
, 255*63);
709 /* ---------- Task Collector Thread implementation ---------- */
711 static void sas_queue(struct sas_ha_struct
*sas_ha
)
713 struct scsi_core
*core
= &sas_ha
->core
;
718 struct sas_internal
*i
= to_sas_internal(core
->shost
->transportt
);
720 spin_lock_irqsave(&core
->task_queue_lock
, flags
);
721 while (!core
->queue_thread_kill
&&
722 !list_empty(&core
->task_queue
)) {
724 can_queue
= sas_ha
->lldd_queue_size
- core
->task_queue_size
;
725 if (can_queue
>= 0) {
726 can_queue
= core
->task_queue_size
;
727 list_splice_init(&core
->task_queue
, &q
);
729 struct list_head
*a
, *n
;
731 can_queue
= sas_ha
->lldd_queue_size
;
732 list_for_each_safe(a
, n
, &core
->task_queue
) {
733 list_move_tail(a
, &q
);
734 if (--can_queue
== 0)
737 can_queue
= sas_ha
->lldd_queue_size
;
739 core
->task_queue_size
-= can_queue
;
740 spin_unlock_irqrestore(&core
->task_queue_lock
, flags
);
742 struct sas_task
*task
= list_entry(q
.next
,
746 res
= i
->dft
->lldd_execute_task(task
, can_queue
,
749 __list_add(&q
, task
->list
.prev
, &task
->list
);
751 spin_lock_irqsave(&core
->task_queue_lock
, flags
);
753 list_splice_init(&q
, &core
->task_queue
); /*at head*/
754 core
->task_queue_size
+= can_queue
;
757 spin_unlock_irqrestore(&core
->task_queue_lock
, flags
);
760 static DECLARE_COMPLETION(queue_th_comp
);
763 * sas_queue_thread -- The Task Collector thread
764 * @_sas_ha: pointer to struct sas_ha
766 static int sas_queue_thread(void *_sas_ha
)
768 struct sas_ha_struct
*sas_ha
= _sas_ha
;
769 struct scsi_core
*core
= &sas_ha
->core
;
771 daemonize("sas_queue_%d", core
->shost
->host_no
);
772 current
->flags
|= PF_NOFREEZE
;
774 complete(&queue_th_comp
);
777 down_interruptible(&core
->queue_thread_sema
);
779 if (core
->queue_thread_kill
)
783 complete(&queue_th_comp
);
788 int sas_init_queue(struct sas_ha_struct
*sas_ha
)
791 struct scsi_core
*core
= &sas_ha
->core
;
793 spin_lock_init(&core
->task_queue_lock
);
794 core
->task_queue_size
= 0;
795 INIT_LIST_HEAD(&core
->task_queue
);
796 init_MUTEX_LOCKED(&core
->queue_thread_sema
);
798 res
= kernel_thread(sas_queue_thread
, sas_ha
, 0);
800 wait_for_completion(&queue_th_comp
);
802 return res
< 0 ? res
: 0;
805 void sas_shutdown_queue(struct sas_ha_struct
*sas_ha
)
808 struct scsi_core
*core
= &sas_ha
->core
;
809 struct sas_task
*task
, *n
;
811 init_completion(&queue_th_comp
);
812 core
->queue_thread_kill
= 1;
813 up(&core
->queue_thread_sema
);
814 wait_for_completion(&queue_th_comp
);
816 if (!list_empty(&core
->task_queue
))
817 SAS_DPRINTK("HA: %llx: scsi core task queue is NOT empty!?\n",
818 SAS_ADDR(sas_ha
->sas_addr
));
820 spin_lock_irqsave(&core
->task_queue_lock
, flags
);
821 list_for_each_entry_safe(task
, n
, &core
->task_queue
, list
) {
822 struct scsi_cmnd
*cmd
= task
->uldd_task
;
824 list_del_init(&task
->list
);
826 ASSIGN_SAS_TASK(cmd
, NULL
);
828 cmd
->result
= DID_ABORT
<< 16;
831 spin_unlock_irqrestore(&core
->task_queue_lock
, flags
);
835 * Call the LLDD task abort routine directly. This function is intended for
836 * use by upper layers that need to tell the LLDD to abort a task.
838 int __sas_task_abort(struct sas_task
*task
)
840 struct sas_internal
*si
=
841 to_sas_internal(task
->dev
->port
->ha
->core
.shost
->transportt
);
845 spin_lock_irqsave(&task
->task_state_lock
, flags
);
846 if (task
->task_state_flags
& SAS_TASK_STATE_ABORTED
||
847 task
->task_state_flags
& SAS_TASK_STATE_DONE
) {
848 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
849 SAS_DPRINTK("%s: Task %p already finished.\n", __FUNCTION__
,
853 task
->task_state_flags
|= SAS_TASK_STATE_ABORTED
;
854 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
856 if (!si
->dft
->lldd_abort_task
)
859 res
= si
->dft
->lldd_abort_task(task
);
861 spin_lock_irqsave(&task
->task_state_lock
, flags
);
862 if ((task
->task_state_flags
& SAS_TASK_STATE_DONE
) ||
863 (res
== TMF_RESP_FUNC_COMPLETE
))
865 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
866 task
->task_done(task
);
870 if (!(task
->task_state_flags
& SAS_TASK_STATE_DONE
))
871 task
->task_state_flags
&= ~SAS_TASK_STATE_ABORTED
;
872 spin_unlock_irqrestore(&task
->task_state_lock
, flags
);
878 * Tell an upper layer that it needs to initiate an abort for a given task.
879 * This should only ever be called by an LLDD.
881 void sas_task_abort(struct sas_task
*task
)
883 struct scsi_cmnd
*sc
= task
->uldd_task
;
885 /* Escape for libsas internal commands */
887 if (!del_timer(&task
->timer
))
889 task
->timer
.function(task
->timer
.data
);
893 scsi_req_abort_cmd(sc
);
894 scsi_schedule_eh(sc
->device
->host
);
897 EXPORT_SYMBOL_GPL(sas_queuecommand
);
898 EXPORT_SYMBOL_GPL(sas_target_alloc
);
899 EXPORT_SYMBOL_GPL(sas_slave_configure
);
900 EXPORT_SYMBOL_GPL(sas_slave_destroy
);
901 EXPORT_SYMBOL_GPL(sas_change_queue_depth
);
902 EXPORT_SYMBOL_GPL(sas_change_queue_type
);
903 EXPORT_SYMBOL_GPL(sas_bios_param
);
904 EXPORT_SYMBOL_GPL(__sas_task_abort
);
905 EXPORT_SYMBOL_GPL(sas_task_abort
);
906 EXPORT_SYMBOL_GPL(sas_phy_reset
);
907 EXPORT_SYMBOL_GPL(sas_phy_enable
);