GUI: Fix Tomato RAF theme for all builds. Compilation typo.
[tomato.git] / release / src-rt-6.x.4708 / linux / linux-2.6.36 / drivers / scsi / aic94xx / aic94xx_tmf.c
blob95ff6119eb94b36bdfe9617d1d26178da7585790
1 /*
2 * Aic94xx Task Management Functions
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 * This file is licensed under GPLv2.
9 * This file is part of the aic94xx driver.
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27 #include <linux/spinlock.h>
28 #include <linux/gfp.h>
29 #include "aic94xx.h"
30 #include "aic94xx_sas.h"
31 #include "aic94xx_hwi.h"
33 /* ---------- Internal enqueue ---------- */
35 static int asd_enqueue_internal(struct asd_ascb *ascb,
36 void (*tasklet_complete)(struct asd_ascb *,
37 struct done_list_struct *),
38 void (*timed_out)(unsigned long))
40 int res;
42 ascb->tasklet_complete = tasklet_complete;
43 ascb->uldd_timer = 1;
45 ascb->timer.data = (unsigned long) ascb;
46 ascb->timer.function = timed_out;
47 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
49 add_timer(&ascb->timer);
51 res = asd_post_ascb_list(ascb->ha, ascb, 1);
52 if (unlikely(res))
53 del_timer(&ascb->timer);
54 return res;
57 /* ---------- CLEAR NEXUS ---------- */
59 struct tasklet_completion_status {
60 int dl_opcode;
61 int tmf_state;
62 u8 tag_valid:1;
63 __be16 tag;
66 #define DECLARE_TCS(tcs) \
67 struct tasklet_completion_status tcs = { \
68 .dl_opcode = 0, \
69 .tmf_state = 0, \
70 .tag_valid = 0, \
71 .tag = 0, \
75 static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
76 struct done_list_struct *dl)
78 struct tasklet_completion_status *tcs = ascb->uldd_task;
79 ASD_DPRINTK("%s: here\n", __func__);
80 if (!del_timer(&ascb->timer)) {
81 ASD_DPRINTK("%s: couldn't delete timer\n", __func__);
82 return;
84 ASD_DPRINTK("%s: opcode: 0x%x\n", __func__, dl->opcode);
85 tcs->dl_opcode = dl->opcode;
86 complete(ascb->completion);
87 asd_ascb_free(ascb);
90 static void asd_clear_nexus_timedout(unsigned long data)
92 struct asd_ascb *ascb = (void *)data;
93 struct tasklet_completion_status *tcs = ascb->uldd_task;
95 ASD_DPRINTK("%s: here\n", __func__);
96 tcs->dl_opcode = TMF_RESP_FUNC_FAILED;
97 complete(ascb->completion);
100 #define CLEAR_NEXUS_PRE \
101 struct asd_ascb *ascb; \
102 struct scb *scb; \
103 int res; \
104 DECLARE_COMPLETION_ONSTACK(completion); \
105 DECLARE_TCS(tcs); \
107 ASD_DPRINTK("%s: PRE\n", __func__); \
108 res = 1; \
109 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
110 if (!ascb) \
111 return -ENOMEM; \
113 ascb->completion = &completion; \
114 ascb->uldd_task = &tcs; \
115 scb = ascb->scb; \
116 scb->header.opcode = CLEAR_NEXUS
118 #define CLEAR_NEXUS_POST \
119 ASD_DPRINTK("%s: POST\n", __func__); \
120 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
121 asd_clear_nexus_timedout); \
122 if (res) \
123 goto out_err; \
124 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __func__); \
125 wait_for_completion(&completion); \
126 res = tcs.dl_opcode; \
127 if (res == TC_NO_ERROR) \
128 res = TMF_RESP_FUNC_COMPLETE; \
129 return res; \
130 out_err: \
131 asd_ascb_free(ascb); \
132 return res
134 int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
136 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
138 CLEAR_NEXUS_PRE;
139 scb->clear_nexus.nexus = NEXUS_ADAPTER;
140 CLEAR_NEXUS_POST;
143 int asd_clear_nexus_port(struct asd_sas_port *port)
145 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
147 CLEAR_NEXUS_PRE;
148 scb->clear_nexus.nexus = NEXUS_PORT;
149 scb->clear_nexus.conn_mask = port->phy_mask;
150 CLEAR_NEXUS_POST;
153 enum clear_nexus_phase {
154 NEXUS_PHASE_PRE,
155 NEXUS_PHASE_POST,
156 NEXUS_PHASE_RESUME,
159 static int asd_clear_nexus_I_T(struct domain_device *dev,
160 enum clear_nexus_phase phase)
162 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
164 CLEAR_NEXUS_PRE;
165 scb->clear_nexus.nexus = NEXUS_I_T;
166 switch (phase) {
167 case NEXUS_PHASE_PRE:
168 scb->clear_nexus.flags = EXEC_Q | SUSPEND_TX;
169 break;
170 case NEXUS_PHASE_POST:
171 scb->clear_nexus.flags = SEND_Q | NOTINQ;
172 break;
173 case NEXUS_PHASE_RESUME:
174 scb->clear_nexus.flags = RESUME_TX;
176 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
177 dev->lldd_dev);
178 CLEAR_NEXUS_POST;
181 int asd_I_T_nexus_reset(struct domain_device *dev)
183 int res, tmp_res, i;
184 struct sas_phy *phy = sas_find_local_phy(dev);
185 /* Standard mandates link reset for ATA (type 0) and
186 * hard reset for SSP (type 1) */
187 int reset_type = (dev->dev_type == SATA_DEV ||
188 (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
190 asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE);
191 /* send a hard reset */
192 ASD_DPRINTK("sending %s reset to %s\n",
193 reset_type ? "hard" : "soft", dev_name(&phy->dev));
194 res = sas_phy_reset(phy, reset_type);
195 if (res == TMF_RESP_FUNC_COMPLETE) {
196 /* wait for the maximum settle time */
197 msleep(500);
198 /* clear all outstanding commands (keep nexus suspended) */
199 asd_clear_nexus_I_T(dev, NEXUS_PHASE_POST);
201 for (i = 0 ; i < 3; i++) {
202 tmp_res = asd_clear_nexus_I_T(dev, NEXUS_PHASE_RESUME);
203 if (tmp_res == TC_RESUME)
204 return res;
205 msleep(500);
208 /* This is a bit of a problem: the sequencer is still suspended
209 * and is refusing to resume. Hope it will resume on a bigger hammer
210 * or the disk is lost */
211 dev_printk(KERN_ERR, &phy->dev,
212 "Failed to resume nexus after reset 0x%x\n", tmp_res);
214 return TMF_RESP_FUNC_FAILED;
217 static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
219 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
221 CLEAR_NEXUS_PRE;
222 scb->clear_nexus.nexus = NEXUS_I_T_L;
223 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
224 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
225 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
226 dev->lldd_dev);
227 CLEAR_NEXUS_POST;
230 static int asd_clear_nexus_tag(struct sas_task *task)
232 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
233 struct asd_ascb *tascb = task->lldd_task;
235 CLEAR_NEXUS_PRE;
236 scb->clear_nexus.nexus = NEXUS_TAG;
237 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
238 scb->clear_nexus.ssp_task.tag = tascb->tag;
239 if (task->dev->tproto)
240 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
241 task->dev->lldd_dev);
242 CLEAR_NEXUS_POST;
245 static int asd_clear_nexus_index(struct sas_task *task)
247 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
248 struct asd_ascb *tascb = task->lldd_task;
250 CLEAR_NEXUS_PRE;
251 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
252 if (task->dev->tproto)
253 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
254 task->dev->lldd_dev);
255 scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
256 CLEAR_NEXUS_POST;
259 /* ---------- TMFs ---------- */
261 static void asd_tmf_timedout(unsigned long data)
263 struct asd_ascb *ascb = (void *) data;
264 struct tasklet_completion_status *tcs = ascb->uldd_task;
266 ASD_DPRINTK("tmf timed out\n");
267 tcs->tmf_state = TMF_RESP_FUNC_FAILED;
268 complete(ascb->completion);
271 static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
272 struct done_list_struct *dl)
274 struct asd_ha_struct *asd_ha = ascb->ha;
275 unsigned long flags;
276 struct tc_resp_sb_struct {
277 __le16 index_escb;
278 u8 len_lsb;
279 u8 flags;
280 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
282 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
283 struct asd_ascb *escb;
284 struct asd_dma_tok *edb;
285 struct ssp_frame_hdr *fh;
286 struct ssp_response_iu *ru;
287 int res = TMF_RESP_FUNC_FAILED;
289 ASD_DPRINTK("tmf resp tasklet\n");
291 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
292 escb = asd_tc_index_find(&asd_ha->seq,
293 (int)le16_to_cpu(resp_sb->index_escb));
294 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
296 if (!escb) {
297 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
298 return res;
301 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
302 ascb->tag = *(__be16 *)(edb->vaddr+4);
303 fh = edb->vaddr + 16;
304 ru = edb->vaddr + 16 + sizeof(*fh);
305 res = ru->status;
306 if (ru->datapres == 1) /* Response data present */
307 res = ru->resp_data[3];
308 ascb->tag_valid = 1;
310 asd_invalidate_edb(escb, edb_id);
311 return res;
314 static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
315 struct done_list_struct *dl)
317 struct tasklet_completion_status *tcs;
319 if (!del_timer(&ascb->timer))
320 return;
322 tcs = ascb->uldd_task;
323 ASD_DPRINTK("tmf tasklet complete\n");
325 tcs->dl_opcode = dl->opcode;
327 if (dl->opcode == TC_SSP_RESP) {
328 tcs->tmf_state = asd_get_tmf_resp_tasklet(ascb, dl);
329 tcs->tag_valid = ascb->tag_valid;
330 tcs->tag = ascb->tag;
333 complete(ascb->completion);
334 asd_ascb_free(ascb);
337 static int asd_clear_nexus(struct sas_task *task)
339 int res = TMF_RESP_FUNC_FAILED;
340 int leftover;
341 struct asd_ascb *tascb = task->lldd_task;
342 DECLARE_COMPLETION_ONSTACK(completion);
343 unsigned long flags;
345 tascb->completion = &completion;
347 ASD_DPRINTK("task not done, clearing nexus\n");
348 if (tascb->tag_valid)
349 res = asd_clear_nexus_tag(task);
350 else
351 res = asd_clear_nexus_index(task);
352 leftover = wait_for_completion_timeout(&completion,
353 AIC94XX_SCB_TIMEOUT);
354 tascb->completion = NULL;
355 ASD_DPRINTK("came back from clear nexus\n");
356 spin_lock_irqsave(&task->task_state_lock, flags);
357 if (leftover < 1)
358 res = TMF_RESP_FUNC_FAILED;
359 if (task->task_state_flags & SAS_TASK_STATE_DONE)
360 res = TMF_RESP_FUNC_COMPLETE;
361 spin_unlock_irqrestore(&task->task_state_lock, flags);
363 return res;
367 * asd_abort_task -- ABORT TASK TMF
368 * @task: the task to be aborted
370 * Before calling ABORT TASK the task state flags should be ORed with
371 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
372 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
374 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
375 * Returns: SAS TMF responses (see sas_task.h),
376 * -ENOMEM,
377 * -SAS_QUEUE_FULL.
379 * When ABORT TASK returns, the caller of ABORT TASK checks first the
380 * task->task_state_flags, and then the return value of ABORT TASK.
382 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
383 * task was completed successfully prior to it being aborted. The
384 * caller of ABORT TASK has responsibility to call task->task_done()
385 * xor free the task, depending on their framework. The return code
386 * is TMF_RESP_FUNC_FAILED in this case.
388 * Else the SAS_TASK_STATE_DONE bit is not set,
389 * If the return code is TMF_RESP_FUNC_COMPLETE, then
390 * the task was aborted successfully. The caller of
391 * ABORT TASK has responsibility to call task->task_done()
392 * to finish the task, xor free the task depending on their
393 * framework.
394 * else
395 * the ABORT TASK returned some kind of error. The task
396 * was _not_ cancelled. Nothing can be assumed.
397 * The caller of ABORT TASK may wish to retry.
399 int asd_abort_task(struct sas_task *task)
401 struct asd_ascb *tascb = task->lldd_task;
402 struct asd_ha_struct *asd_ha = tascb->ha;
403 int res = 1;
404 unsigned long flags;
405 struct asd_ascb *ascb = NULL;
406 struct scb *scb;
407 int leftover;
408 DECLARE_TCS(tcs);
409 DECLARE_COMPLETION_ONSTACK(completion);
410 DECLARE_COMPLETION_ONSTACK(tascb_completion);
412 tascb->completion = &tascb_completion;
414 spin_lock_irqsave(&task->task_state_lock, flags);
415 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
416 spin_unlock_irqrestore(&task->task_state_lock, flags);
417 res = TMF_RESP_FUNC_COMPLETE;
418 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
419 goto out_done;
421 spin_unlock_irqrestore(&task->task_state_lock, flags);
423 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
424 if (!ascb)
425 return -ENOMEM;
427 ascb->uldd_task = &tcs;
428 ascb->completion = &completion;
429 scb = ascb->scb;
430 scb->header.opcode = SCB_ABORT_TASK;
432 switch (task->task_proto) {
433 case SAS_PROTOCOL_SATA:
434 case SAS_PROTOCOL_STP:
435 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
436 break;
437 case SAS_PROTOCOL_SSP:
438 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
439 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
440 break;
441 case SAS_PROTOCOL_SMP:
442 break;
443 default:
444 break;
447 if (task->task_proto == SAS_PROTOCOL_SSP) {
448 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
449 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
450 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
451 memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
452 task->dev->port->ha->hashed_sas_addr,
453 HASHED_SAS_ADDR_SIZE);
454 scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
456 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
457 scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
458 scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
461 scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
462 scb->abort_task.conn_handle = cpu_to_le16(
463 (u16)(unsigned long)task->dev->lldd_dev);
464 scb->abort_task.retry_count = 1;
465 scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
466 scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
468 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
469 asd_tmf_timedout);
470 if (res)
471 goto out_free;
472 wait_for_completion(&completion);
473 ASD_DPRINTK("tmf came back\n");
475 tascb->tag = tcs.tag;
476 tascb->tag_valid = tcs.tag_valid;
478 spin_lock_irqsave(&task->task_state_lock, flags);
479 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
480 spin_unlock_irqrestore(&task->task_state_lock, flags);
481 res = TMF_RESP_FUNC_COMPLETE;
482 ASD_DPRINTK("%s: task 0x%p done\n", __func__, task);
483 goto out_done;
485 spin_unlock_irqrestore(&task->task_state_lock, flags);
487 if (tcs.dl_opcode == TC_SSP_RESP) {
488 /* The task to be aborted has been sent to the device.
489 * We got a Response IU for the ABORT TASK TMF. */
490 if (tcs.tmf_state == TMF_RESP_FUNC_COMPLETE)
491 res = asd_clear_nexus(task);
492 else
493 res = tcs.tmf_state;
494 } else if (tcs.dl_opcode == TC_NO_ERROR &&
495 tcs.tmf_state == TMF_RESP_FUNC_FAILED) {
496 /* timeout */
497 res = TMF_RESP_FUNC_FAILED;
498 } else {
499 /* In the following we assume that the managing layer
500 * will _never_ make a mistake, when issuing ABORT
501 * TASK.
503 switch (tcs.dl_opcode) {
504 default:
505 res = asd_clear_nexus(task);
506 /* fallthrough */
507 case TC_NO_ERROR:
508 break;
509 /* The task hasn't been sent to the device xor
510 * we never got a (sane) Response IU for the
511 * ABORT TASK TMF.
513 case TF_NAK_RECV:
514 res = TMF_RESP_INVALID_FRAME;
515 break;
516 case TF_TMF_TASK_DONE: /* done but not reported yet */
517 res = TMF_RESP_FUNC_FAILED;
518 leftover =
519 wait_for_completion_timeout(&tascb_completion,
520 AIC94XX_SCB_TIMEOUT);
521 spin_lock_irqsave(&task->task_state_lock, flags);
522 if (leftover < 1)
523 res = TMF_RESP_FUNC_FAILED;
524 if (task->task_state_flags & SAS_TASK_STATE_DONE)
525 res = TMF_RESP_FUNC_COMPLETE;
526 spin_unlock_irqrestore(&task->task_state_lock, flags);
527 break;
528 case TF_TMF_NO_TAG:
529 case TF_TMF_TAG_FREE: /* the tag is in the free list */
530 case TF_TMF_NO_CONN_HANDLE: /* no such device */
531 res = TMF_RESP_FUNC_COMPLETE;
532 break;
533 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
534 res = TMF_RESP_FUNC_ESUPP;
535 break;
538 out_done:
539 tascb->completion = NULL;
540 if (res == TMF_RESP_FUNC_COMPLETE) {
541 task->lldd_task = NULL;
542 mb();
543 asd_ascb_free(tascb);
545 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
546 return res;
548 out_free:
549 asd_ascb_free(ascb);
550 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
551 return res;
555 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
556 * @dev: pointer to struct domain_device of interest
557 * @lun: pointer to u8[8] which is the LUN
558 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
559 * @index: the transaction context of the task to be queried if QT TMF
561 * This function is used to send ABORT TASK SET, CLEAR ACA,
562 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
564 * No SCBs should be queued to the I_T_L nexus when this SCB is
565 * pending.
567 * Returns: TMF response code (see sas_task.h or the SAS spec)
569 static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
570 int tmf, int index)
572 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
573 struct asd_ascb *ascb;
574 int res = 1;
575 struct scb *scb;
576 DECLARE_COMPLETION_ONSTACK(completion);
577 DECLARE_TCS(tcs);
579 if (!(dev->tproto & SAS_PROTOCOL_SSP))
580 return TMF_RESP_FUNC_ESUPP;
582 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
583 if (!ascb)
584 return -ENOMEM;
586 ascb->completion = &completion;
587 ascb->uldd_task = &tcs;
588 scb = ascb->scb;
590 if (tmf == TMF_QUERY_TASK)
591 scb->header.opcode = QUERY_SSP_TASK;
592 else
593 scb->header.opcode = INITIATE_SSP_TMF;
595 scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
596 scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
597 /* SSP frame header */
598 scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
599 memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
600 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
601 memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
602 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
603 scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
604 /* SSP Task IU */
605 memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
606 scb->ssp_tmf.ssp_task.tmf = tmf;
608 scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
609 scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
610 dev->lldd_dev);
611 scb->ssp_tmf.retry_count = 1;
612 scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
613 if (tmf == TMF_QUERY_TASK)
614 scb->ssp_tmf.index = cpu_to_le16(index);
616 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
617 asd_tmf_timedout);
618 if (res)
619 goto out_err;
620 wait_for_completion(&completion);
622 switch (tcs.dl_opcode) {
623 case TC_NO_ERROR:
624 res = TMF_RESP_FUNC_COMPLETE;
625 break;
626 case TF_NAK_RECV:
627 res = TMF_RESP_INVALID_FRAME;
628 break;
629 case TF_TMF_TASK_DONE:
630 res = TMF_RESP_FUNC_FAILED;
631 break;
632 case TF_TMF_NO_TAG:
633 case TF_TMF_TAG_FREE: /* the tag is in the free list */
634 case TF_TMF_NO_CONN_HANDLE: /* no such device */
635 res = TMF_RESP_FUNC_COMPLETE;
636 break;
637 case TF_TMF_NO_CTX: /* not in seq, or proto != SSP */
638 res = TMF_RESP_FUNC_ESUPP;
639 break;
640 default:
641 /* Allow TMF response codes to propagate upwards */
642 res = tcs.dl_opcode;
643 break;
645 return res;
646 out_err:
647 asd_ascb_free(ascb);
648 return res;
651 int asd_abort_task_set(struct domain_device *dev, u8 *lun)
653 int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
655 if (res == TMF_RESP_FUNC_COMPLETE)
656 asd_clear_nexus_I_T_L(dev, lun);
657 return res;
660 int asd_clear_aca(struct domain_device *dev, u8 *lun)
662 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
664 if (res == TMF_RESP_FUNC_COMPLETE)
665 asd_clear_nexus_I_T_L(dev, lun);
666 return res;
669 int asd_clear_task_set(struct domain_device *dev, u8 *lun)
671 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
673 if (res == TMF_RESP_FUNC_COMPLETE)
674 asd_clear_nexus_I_T_L(dev, lun);
675 return res;
678 int asd_lu_reset(struct domain_device *dev, u8 *lun)
680 int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
682 if (res == TMF_RESP_FUNC_COMPLETE)
683 asd_clear_nexus_I_T_L(dev, lun);
684 return res;
688 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
689 * task: pointer to sas_task struct of interest
691 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
692 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
694 * Normally the management layer sets the task to aborted state,
695 * and then calls query task and then abort task.
697 int asd_query_task(struct sas_task *task)
699 struct asd_ascb *ascb = task->lldd_task;
700 int index;
702 if (ascb) {
703 index = ascb->tc_index;
704 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
705 TMF_QUERY_TASK, index);
707 return TMF_RESP_FUNC_COMPLETE;