[ARM] cache align memset and memzero
[linux-2.6/btrfs-unstable.git] / drivers / scsi / libiscsi.c
blobb43bf1d60dac8a5499922f8a66da15b3c26626d2
1 /*
2 * iSCSI lib functions
4 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2004 - 2006 Mike Christie
6 * Copyright (C) 2004 - 2005 Dmitry Yusupov
7 * Copyright (C) 2004 - 2005 Alex Aizman
8 * maintained by open-iscsi@googlegroups.com
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 #include <linux/types.h>
25 #include <linux/kfifo.h>
26 #include <linux/delay.h>
27 #include <linux/log2.h>
28 #include <asm/unaligned.h>
29 #include <net/tcp.h>
30 #include <scsi/scsi_cmnd.h>
31 #include <scsi/scsi_device.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_tcq.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi.h>
36 #include <scsi/iscsi_proto.h>
37 #include <scsi/scsi_transport.h>
38 #include <scsi/scsi_transport_iscsi.h>
39 #include <scsi/libiscsi.h>
41 struct iscsi_session *
42 class_to_transport_session(struct iscsi_cls_session *cls_session)
44 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
45 return iscsi_hostdata(shost->hostdata);
47 EXPORT_SYMBOL_GPL(class_to_transport_session);
49 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
50 #define SNA32_CHECK 2147483648UL
52 static int iscsi_sna_lt(u32 n1, u32 n2)
54 return n1 != n2 && ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
55 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
58 /* Serial Number Arithmetic, 32 bits, less than, RFC1982 */
59 static int iscsi_sna_lte(u32 n1, u32 n2)
61 return n1 == n2 || ((n1 < n2 && (n2 - n1 < SNA32_CHECK)) ||
62 (n1 > n2 && (n2 - n1 < SNA32_CHECK)));
65 void
66 iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
68 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
69 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
72 * standard specifies this check for when to update expected and
73 * max sequence numbers
75 if (iscsi_sna_lt(max_cmdsn, exp_cmdsn - 1))
76 return;
78 if (exp_cmdsn != session->exp_cmdsn &&
79 !iscsi_sna_lt(exp_cmdsn, session->exp_cmdsn))
80 session->exp_cmdsn = exp_cmdsn;
82 if (max_cmdsn != session->max_cmdsn &&
83 !iscsi_sna_lt(max_cmdsn, session->max_cmdsn)) {
84 session->max_cmdsn = max_cmdsn;
86 * if the window closed with IO queued, then kick the
87 * xmit thread
89 if (!list_empty(&session->leadconn->xmitqueue) ||
90 !list_empty(&session->leadconn->mgmtqueue))
91 scsi_queue_work(session->host,
92 &session->leadconn->xmitwork);
95 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
97 void iscsi_prep_unsolicit_data_pdu(struct iscsi_cmd_task *ctask,
98 struct iscsi_data *hdr)
100 struct iscsi_conn *conn = ctask->conn;
102 memset(hdr, 0, sizeof(struct iscsi_data));
103 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
104 hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
105 ctask->unsol_datasn++;
106 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
107 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
109 hdr->itt = ctask->hdr->itt;
110 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
111 hdr->offset = cpu_to_be32(ctask->unsol_offset);
113 if (ctask->unsol_count > conn->max_xmit_dlength) {
114 hton24(hdr->dlength, conn->max_xmit_dlength);
115 ctask->data_count = conn->max_xmit_dlength;
116 ctask->unsol_offset += ctask->data_count;
117 hdr->flags = 0;
118 } else {
119 hton24(hdr->dlength, ctask->unsol_count);
120 ctask->data_count = ctask->unsol_count;
121 hdr->flags = ISCSI_FLAG_CMD_FINAL;
124 EXPORT_SYMBOL_GPL(iscsi_prep_unsolicit_data_pdu);
126 static int iscsi_add_hdr(struct iscsi_cmd_task *ctask, unsigned len)
128 unsigned exp_len = ctask->hdr_len + len;
130 if (exp_len > ctask->hdr_max) {
131 WARN_ON(1);
132 return -EINVAL;
135 WARN_ON(len & (ISCSI_PAD_LEN - 1)); /* caller must pad the AHS */
136 ctask->hdr_len = exp_len;
137 return 0;
141 * make an extended cdb AHS
143 static int iscsi_prep_ecdb_ahs(struct iscsi_cmd_task *ctask)
145 struct scsi_cmnd *cmd = ctask->sc;
146 unsigned rlen, pad_len;
147 unsigned short ahslength;
148 struct iscsi_ecdb_ahdr *ecdb_ahdr;
149 int rc;
151 ecdb_ahdr = iscsi_next_hdr(ctask);
152 rlen = cmd->cmd_len - ISCSI_CDB_SIZE;
154 BUG_ON(rlen > sizeof(ecdb_ahdr->ecdb));
155 ahslength = rlen + sizeof(ecdb_ahdr->reserved);
157 pad_len = iscsi_padding(rlen);
159 rc = iscsi_add_hdr(ctask, sizeof(ecdb_ahdr->ahslength) +
160 sizeof(ecdb_ahdr->ahstype) + ahslength + pad_len);
161 if (rc)
162 return rc;
164 if (pad_len)
165 memset(&ecdb_ahdr->ecdb[rlen], 0, pad_len);
167 ecdb_ahdr->ahslength = cpu_to_be16(ahslength);
168 ecdb_ahdr->ahstype = ISCSI_AHSTYPE_CDB;
169 ecdb_ahdr->reserved = 0;
170 memcpy(ecdb_ahdr->ecdb, cmd->cmnd + ISCSI_CDB_SIZE, rlen);
172 debug_scsi("iscsi_prep_ecdb_ahs: varlen_cdb_len %d "
173 "rlen %d pad_len %d ahs_length %d iscsi_headers_size %u\n",
174 cmd->cmd_len, rlen, pad_len, ahslength, ctask->hdr_len);
176 return 0;
179 static int iscsi_prep_bidi_ahs(struct iscsi_cmd_task *ctask)
181 struct scsi_cmnd *sc = ctask->sc;
182 struct iscsi_rlength_ahdr *rlen_ahdr;
183 int rc;
185 rlen_ahdr = iscsi_next_hdr(ctask);
186 rc = iscsi_add_hdr(ctask, sizeof(*rlen_ahdr));
187 if (rc)
188 return rc;
190 rlen_ahdr->ahslength =
191 cpu_to_be16(sizeof(rlen_ahdr->read_length) +
192 sizeof(rlen_ahdr->reserved));
193 rlen_ahdr->ahstype = ISCSI_AHSTYPE_RLENGTH;
194 rlen_ahdr->reserved = 0;
195 rlen_ahdr->read_length = cpu_to_be32(scsi_in(sc)->length);
197 debug_scsi("bidi-in rlen_ahdr->read_length(%d) "
198 "rlen_ahdr->ahslength(%d)\n",
199 be32_to_cpu(rlen_ahdr->read_length),
200 be16_to_cpu(rlen_ahdr->ahslength));
201 return 0;
205 * iscsi_prep_scsi_cmd_pdu - prep iscsi scsi cmd pdu
206 * @ctask: iscsi cmd task
208 * Prep basic iSCSI PDU fields for a scsi cmd pdu. The LLD should set
209 * fields like dlength or final based on how much data it sends
211 static int iscsi_prep_scsi_cmd_pdu(struct iscsi_cmd_task *ctask)
213 struct iscsi_conn *conn = ctask->conn;
214 struct iscsi_session *session = conn->session;
215 struct iscsi_cmd *hdr = ctask->hdr;
216 struct scsi_cmnd *sc = ctask->sc;
217 unsigned hdrlength, cmd_len;
218 int rc;
220 ctask->hdr_len = 0;
221 rc = iscsi_add_hdr(ctask, sizeof(*hdr));
222 if (rc)
223 return rc;
224 hdr->opcode = ISCSI_OP_SCSI_CMD;
225 hdr->flags = ISCSI_ATTR_SIMPLE;
226 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
227 hdr->itt = build_itt(ctask->itt, session->age);
228 hdr->cmdsn = cpu_to_be32(session->cmdsn);
229 session->cmdsn++;
230 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
231 cmd_len = sc->cmd_len;
232 if (cmd_len < ISCSI_CDB_SIZE)
233 memset(&hdr->cdb[cmd_len], 0, ISCSI_CDB_SIZE - cmd_len);
234 else if (cmd_len > ISCSI_CDB_SIZE) {
235 rc = iscsi_prep_ecdb_ahs(ctask);
236 if (rc)
237 return rc;
238 cmd_len = ISCSI_CDB_SIZE;
240 memcpy(hdr->cdb, sc->cmnd, cmd_len);
242 ctask->imm_count = 0;
243 if (scsi_bidi_cmnd(sc)) {
244 hdr->flags |= ISCSI_FLAG_CMD_READ;
245 rc = iscsi_prep_bidi_ahs(ctask);
246 if (rc)
247 return rc;
249 if (sc->sc_data_direction == DMA_TO_DEVICE) {
250 unsigned out_len = scsi_out(sc)->length;
251 hdr->data_length = cpu_to_be32(out_len);
252 hdr->flags |= ISCSI_FLAG_CMD_WRITE;
254 * Write counters:
256 * imm_count bytes to be sent right after
257 * SCSI PDU Header
259 * unsol_count bytes(as Data-Out) to be sent
260 * without R2T ack right after
261 * immediate data
263 * r2t_data_count bytes to be sent via R2T ack's
265 * pad_count bytes to be sent as zero-padding
267 ctask->unsol_count = 0;
268 ctask->unsol_offset = 0;
269 ctask->unsol_datasn = 0;
271 if (session->imm_data_en) {
272 if (out_len >= session->first_burst)
273 ctask->imm_count = min(session->first_burst,
274 conn->max_xmit_dlength);
275 else
276 ctask->imm_count = min(out_len,
277 conn->max_xmit_dlength);
278 hton24(hdr->dlength, ctask->imm_count);
279 } else
280 zero_data(hdr->dlength);
282 if (!session->initial_r2t_en) {
283 ctask->unsol_count = min(session->first_burst, out_len)
284 - ctask->imm_count;
285 ctask->unsol_offset = ctask->imm_count;
288 if (!ctask->unsol_count)
289 /* No unsolicit Data-Out's */
290 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
291 } else {
292 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
293 zero_data(hdr->dlength);
294 hdr->data_length = cpu_to_be32(scsi_in(sc)->length);
296 if (sc->sc_data_direction == DMA_FROM_DEVICE)
297 hdr->flags |= ISCSI_FLAG_CMD_READ;
300 /* calculate size of additional header segments (AHSs) */
301 hdrlength = ctask->hdr_len - sizeof(*hdr);
303 WARN_ON(hdrlength & (ISCSI_PAD_LEN-1));
304 hdrlength /= ISCSI_PAD_LEN;
306 WARN_ON(hdrlength >= 256);
307 hdr->hlength = hdrlength & 0xFF;
309 if (conn->session->tt->init_cmd_task(conn->ctask))
310 return EIO;
312 conn->scsicmd_pdus_cnt++;
313 debug_scsi("iscsi prep [%s cid %d sc %p cdb 0x%x itt 0x%x "
314 "len %d bidi_len %d cmdsn %d win %d]\n",
315 scsi_bidi_cmnd(sc) ? "bidirectional" :
316 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
317 conn->id, sc, sc->cmnd[0], ctask->itt,
318 scsi_bufflen(sc), scsi_bidi_cmnd(sc) ? scsi_in(sc)->length : 0,
319 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
320 return 0;
324 * iscsi_complete_command - return command back to scsi-ml
325 * @ctask: iscsi cmd task
327 * Must be called with session lock.
328 * This function returns the scsi command to scsi-ml and returns
329 * the cmd task to the pool of available cmd tasks.
331 static void iscsi_complete_command(struct iscsi_cmd_task *ctask)
333 struct iscsi_conn *conn = ctask->conn;
334 struct iscsi_session *session = conn->session;
335 struct scsi_cmnd *sc = ctask->sc;
337 ctask->state = ISCSI_TASK_COMPLETED;
338 ctask->sc = NULL;
339 /* SCSI eh reuses commands to verify us */
340 sc->SCp.ptr = NULL;
341 if (conn->ctask == ctask)
342 conn->ctask = NULL;
343 list_del_init(&ctask->running);
344 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
345 sc->scsi_done(sc);
348 static void __iscsi_get_ctask(struct iscsi_cmd_task *ctask)
350 atomic_inc(&ctask->refcount);
353 static void __iscsi_put_ctask(struct iscsi_cmd_task *ctask)
355 if (atomic_dec_and_test(&ctask->refcount))
356 iscsi_complete_command(ctask);
360 * session lock must be held
362 static void fail_command(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
363 int err)
365 struct scsi_cmnd *sc;
367 sc = ctask->sc;
368 if (!sc)
369 return;
371 if (ctask->state == ISCSI_TASK_PENDING)
373 * cmd never made it to the xmit thread, so we should not count
374 * the cmd in the sequencing
376 conn->session->queued_cmdsn--;
377 else
378 conn->session->tt->cleanup_cmd_task(conn, ctask);
380 sc->result = err;
381 if (!scsi_bidi_cmnd(sc))
382 scsi_set_resid(sc, scsi_bufflen(sc));
383 else {
384 scsi_out(sc)->resid = scsi_out(sc)->length;
385 scsi_in(sc)->resid = scsi_in(sc)->length;
387 if (conn->ctask == ctask)
388 conn->ctask = NULL;
389 /* release ref from queuecommand */
390 __iscsi_put_ctask(ctask);
394 * iscsi_free_mgmt_task - return mgmt task back to pool
395 * @conn: iscsi connection
396 * @mtask: mtask
398 * Must be called with session lock.
400 void iscsi_free_mgmt_task(struct iscsi_conn *conn,
401 struct iscsi_mgmt_task *mtask)
403 list_del_init(&mtask->running);
404 if (conn->login_mtask == mtask)
405 return;
407 if (conn->ping_mtask == mtask)
408 conn->ping_mtask = NULL;
409 __kfifo_put(conn->session->mgmtpool.queue,
410 (void*)&mtask, sizeof(void*));
412 EXPORT_SYMBOL_GPL(iscsi_free_mgmt_task);
414 static struct iscsi_mgmt_task *
415 __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
416 char *data, uint32_t data_size)
418 struct iscsi_session *session = conn->session;
419 struct iscsi_mgmt_task *mtask;
421 if (session->state == ISCSI_STATE_TERMINATE)
422 return NULL;
424 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
425 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
427 * Login and Text are sent serially, in
428 * request-followed-by-response sequence.
429 * Same mtask can be used. Same ITT must be used.
430 * Note that login_mtask is preallocated at conn_create().
432 mtask = conn->login_mtask;
433 else {
434 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
435 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
437 if (!__kfifo_get(session->mgmtpool.queue,
438 (void*)&mtask, sizeof(void*)))
439 return NULL;
442 if (data_size) {
443 memcpy(mtask->data, data, data_size);
444 mtask->data_count = data_size;
445 } else
446 mtask->data_count = 0;
448 memcpy(mtask->hdr, hdr, sizeof(struct iscsi_hdr));
449 INIT_LIST_HEAD(&mtask->running);
450 list_add_tail(&mtask->running, &conn->mgmtqueue);
451 return mtask;
454 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
455 char *data, uint32_t data_size)
457 struct iscsi_conn *conn = cls_conn->dd_data;
458 struct iscsi_session *session = conn->session;
459 int err = 0;
461 spin_lock_bh(&session->lock);
462 if (!__iscsi_conn_send_pdu(conn, hdr, data, data_size))
463 err = -EPERM;
464 spin_unlock_bh(&session->lock);
465 scsi_queue_work(session->host, &conn->xmitwork);
466 return err;
468 EXPORT_SYMBOL_GPL(iscsi_conn_send_pdu);
471 * iscsi_cmd_rsp - SCSI Command Response processing
472 * @conn: iscsi connection
473 * @hdr: iscsi header
474 * @ctask: scsi command task
475 * @data: cmd data buffer
476 * @datalen: len of buffer
478 * iscsi_cmd_rsp sets up the scsi_cmnd fields based on the PDU and
479 * then completes the command and task.
481 static void iscsi_scsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
482 struct iscsi_cmd_task *ctask, char *data,
483 int datalen)
485 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)hdr;
486 struct iscsi_session *session = conn->session;
487 struct scsi_cmnd *sc = ctask->sc;
489 iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
490 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
492 sc->result = (DID_OK << 16) | rhdr->cmd_status;
494 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
495 sc->result = DID_ERROR << 16;
496 goto out;
499 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION) {
500 uint16_t senselen;
502 if (datalen < 2) {
503 invalid_datalen:
504 iscsi_conn_printk(KERN_ERR, conn,
505 "Got CHECK_CONDITION but invalid data "
506 "buffer size of %d\n", datalen);
507 sc->result = DID_BAD_TARGET << 16;
508 goto out;
511 senselen = be16_to_cpu(get_unaligned((__be16 *) data));
512 if (datalen < senselen)
513 goto invalid_datalen;
515 memcpy(sc->sense_buffer, data + 2,
516 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
517 debug_scsi("copied %d bytes of sense\n",
518 min_t(uint16_t, senselen, SCSI_SENSE_BUFFERSIZE));
521 if (rhdr->flags & (ISCSI_FLAG_CMD_BIDI_UNDERFLOW |
522 ISCSI_FLAG_CMD_BIDI_OVERFLOW)) {
523 int res_count = be32_to_cpu(rhdr->bi_residual_count);
525 if (scsi_bidi_cmnd(sc) && res_count > 0 &&
526 (rhdr->flags & ISCSI_FLAG_CMD_BIDI_OVERFLOW ||
527 res_count <= scsi_in(sc)->length))
528 scsi_in(sc)->resid = res_count;
529 else
530 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
533 if (rhdr->flags & (ISCSI_FLAG_CMD_UNDERFLOW |
534 ISCSI_FLAG_CMD_OVERFLOW)) {
535 int res_count = be32_to_cpu(rhdr->residual_count);
537 if (res_count > 0 &&
538 (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW ||
539 res_count <= scsi_bufflen(sc)))
540 /* write side for bidi or uni-io set_resid */
541 scsi_set_resid(sc, res_count);
542 else
543 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
545 out:
546 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
547 (long)sc, sc->result, ctask->itt);
548 conn->scsirsp_pdus_cnt++;
550 __iscsi_put_ctask(ctask);
553 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
555 struct iscsi_tm_rsp *tmf = (struct iscsi_tm_rsp *)hdr;
557 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
558 conn->tmfrsp_pdus_cnt++;
560 if (conn->tmf_state != TMF_QUEUED)
561 return;
563 if (tmf->response == ISCSI_TMF_RSP_COMPLETE)
564 conn->tmf_state = TMF_SUCCESS;
565 else if (tmf->response == ISCSI_TMF_RSP_NO_TASK)
566 conn->tmf_state = TMF_NOT_FOUND;
567 else
568 conn->tmf_state = TMF_FAILED;
569 wake_up(&conn->ehwait);
572 static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr)
574 struct iscsi_nopout hdr;
575 struct iscsi_mgmt_task *mtask;
577 if (!rhdr && conn->ping_mtask)
578 return;
580 memset(&hdr, 0, sizeof(struct iscsi_nopout));
581 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE;
582 hdr.flags = ISCSI_FLAG_CMD_FINAL;
584 if (rhdr) {
585 memcpy(hdr.lun, rhdr->lun, 8);
586 hdr.ttt = rhdr->ttt;
587 hdr.itt = RESERVED_ITT;
588 } else
589 hdr.ttt = RESERVED_ITT;
591 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0);
592 if (!mtask) {
593 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n");
594 return;
597 /* only track our nops */
598 if (!rhdr) {
599 conn->ping_mtask = mtask;
600 conn->last_ping = jiffies;
602 scsi_queue_work(conn->session->host, &conn->xmitwork);
605 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
606 char *data, int datalen)
608 struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
609 struct iscsi_hdr rejected_pdu;
610 uint32_t itt;
612 conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
614 if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
615 if (ntoh24(reject->dlength) > datalen)
616 return ISCSI_ERR_PROTO;
618 if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
619 memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
620 itt = get_itt(rejected_pdu.itt);
621 iscsi_conn_printk(KERN_ERR, conn,
622 "itt 0x%x had pdu (op 0x%x) rejected "
623 "due to DataDigest error.\n", itt,
624 rejected_pdu.opcode);
627 return 0;
631 * __iscsi_complete_pdu - complete pdu
632 * @conn: iscsi conn
633 * @hdr: iscsi header
634 * @data: data buffer
635 * @datalen: len of data buffer
637 * Completes pdu processing by freeing any resources allocated at
638 * queuecommand or send generic. session lock must be held and verify
639 * itt must have been called.
641 static int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
642 char *data, int datalen)
644 struct iscsi_session *session = conn->session;
645 int opcode = hdr->opcode & ISCSI_OPCODE_MASK, rc = 0;
646 struct iscsi_cmd_task *ctask;
647 struct iscsi_mgmt_task *mtask;
648 uint32_t itt;
650 conn->last_recv = jiffies;
651 if (hdr->itt != RESERVED_ITT)
652 itt = get_itt(hdr->itt);
653 else
654 itt = ~0U;
656 if (itt < session->cmds_max) {
657 ctask = session->cmds[itt];
659 debug_scsi("cmdrsp [op 0x%x cid %d itt 0x%x len %d]\n",
660 opcode, conn->id, ctask->itt, datalen);
662 switch(opcode) {
663 case ISCSI_OP_SCSI_CMD_RSP:
664 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
665 iscsi_scsi_cmd_rsp(conn, hdr, ctask, data,
666 datalen);
667 break;
668 case ISCSI_OP_SCSI_DATA_IN:
669 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
670 if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
671 conn->scsirsp_pdus_cnt++;
672 __iscsi_put_ctask(ctask);
674 break;
675 case ISCSI_OP_R2T:
676 /* LLD handles this for now */
677 break;
678 default:
679 rc = ISCSI_ERR_BAD_OPCODE;
680 break;
682 } else if (itt >= ISCSI_MGMT_ITT_OFFSET &&
683 itt < ISCSI_MGMT_ITT_OFFSET + session->mgmtpool_max) {
684 mtask = session->mgmt_cmds[itt - ISCSI_MGMT_ITT_OFFSET];
686 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
687 opcode, conn->id, mtask->itt, datalen);
689 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
690 switch(opcode) {
691 case ISCSI_OP_LOGOUT_RSP:
692 if (datalen) {
693 rc = ISCSI_ERR_PROTO;
694 break;
696 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
697 /* fall through */
698 case ISCSI_OP_LOGIN_RSP:
699 case ISCSI_OP_TEXT_RSP:
701 * login related PDU's exp_statsn is handled in
702 * userspace
704 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
705 rc = ISCSI_ERR_CONN_FAILED;
706 iscsi_free_mgmt_task(conn, mtask);
707 break;
708 case ISCSI_OP_SCSI_TMFUNC_RSP:
709 if (datalen) {
710 rc = ISCSI_ERR_PROTO;
711 break;
714 iscsi_tmf_rsp(conn, hdr);
715 iscsi_free_mgmt_task(conn, mtask);
716 break;
717 case ISCSI_OP_NOOP_IN:
718 if (hdr->ttt != cpu_to_be32(ISCSI_RESERVED_TAG) ||
719 datalen) {
720 rc = ISCSI_ERR_PROTO;
721 break;
723 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
725 if (conn->ping_mtask != mtask) {
727 * If this is not in response to one of our
728 * nops then it must be from userspace.
730 if (iscsi_recv_pdu(conn->cls_conn, hdr, data,
731 datalen))
732 rc = ISCSI_ERR_CONN_FAILED;
733 } else
734 mod_timer(&conn->transport_timer,
735 jiffies + conn->recv_timeout);
736 iscsi_free_mgmt_task(conn, mtask);
737 break;
738 default:
739 rc = ISCSI_ERR_BAD_OPCODE;
740 break;
742 } else if (itt == ~0U) {
743 iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
745 switch(opcode) {
746 case ISCSI_OP_NOOP_IN:
747 if (datalen) {
748 rc = ISCSI_ERR_PROTO;
749 break;
752 if (hdr->ttt == cpu_to_be32(ISCSI_RESERVED_TAG))
753 break;
755 iscsi_send_nopout(conn, (struct iscsi_nopin*)hdr);
756 break;
757 case ISCSI_OP_REJECT:
758 rc = iscsi_handle_reject(conn, hdr, data, datalen);
759 break;
760 case ISCSI_OP_ASYNC_EVENT:
761 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
762 if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
763 rc = ISCSI_ERR_CONN_FAILED;
764 break;
765 default:
766 rc = ISCSI_ERR_BAD_OPCODE;
767 break;
769 } else
770 rc = ISCSI_ERR_BAD_ITT;
772 return rc;
775 int iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
776 char *data, int datalen)
778 int rc;
780 spin_lock(&conn->session->lock);
781 rc = __iscsi_complete_pdu(conn, hdr, data, datalen);
782 spin_unlock(&conn->session->lock);
783 return rc;
785 EXPORT_SYMBOL_GPL(iscsi_complete_pdu);
787 /* verify itt (itt encoding: age+cid+itt) */
788 int iscsi_verify_itt(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
789 uint32_t *ret_itt)
791 struct iscsi_session *session = conn->session;
792 struct iscsi_cmd_task *ctask;
793 uint32_t itt;
795 if (hdr->itt != RESERVED_ITT) {
796 if (((__force u32)hdr->itt & ISCSI_AGE_MASK) !=
797 (session->age << ISCSI_AGE_SHIFT)) {
798 iscsi_conn_printk(KERN_ERR, conn,
799 "received itt %x expected session "
800 "age (%x)\n", (__force u32)hdr->itt,
801 session->age & ISCSI_AGE_MASK);
802 return ISCSI_ERR_BAD_ITT;
805 itt = get_itt(hdr->itt);
806 } else
807 itt = ~0U;
809 if (itt < session->cmds_max) {
810 ctask = session->cmds[itt];
812 if (!ctask->sc) {
813 iscsi_conn_printk(KERN_INFO, conn, "dropping ctask "
814 "with itt 0x%x\n", ctask->itt);
815 /* force drop */
816 return ISCSI_ERR_NO_SCSI_CMD;
819 if (ctask->sc->SCp.phase != session->age) {
820 iscsi_conn_printk(KERN_ERR, conn,
821 "iscsi: ctask's session age %d, "
822 "expected %d\n", ctask->sc->SCp.phase,
823 session->age);
824 return ISCSI_ERR_SESSION_FAILED;
828 *ret_itt = itt;
829 return 0;
831 EXPORT_SYMBOL_GPL(iscsi_verify_itt);
833 void iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
835 struct iscsi_session *session = conn->session;
836 unsigned long flags;
838 spin_lock_irqsave(&session->lock, flags);
839 if (session->state == ISCSI_STATE_FAILED) {
840 spin_unlock_irqrestore(&session->lock, flags);
841 return;
844 if (conn->stop_stage == 0)
845 session->state = ISCSI_STATE_FAILED;
846 spin_unlock_irqrestore(&session->lock, flags);
847 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
848 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
849 iscsi_conn_error(conn->cls_conn, err);
851 EXPORT_SYMBOL_GPL(iscsi_conn_failure);
853 static void iscsi_prep_mtask(struct iscsi_conn *conn,
854 struct iscsi_mgmt_task *mtask)
856 struct iscsi_session *session = conn->session;
857 struct iscsi_hdr *hdr = mtask->hdr;
858 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
860 if (hdr->opcode != (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) &&
861 hdr->opcode != (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
862 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
864 * pre-format CmdSN for outgoing PDU.
866 nop->cmdsn = cpu_to_be32(session->cmdsn);
867 if (hdr->itt != RESERVED_ITT) {
868 hdr->itt = build_itt(mtask->itt, session->age);
870 * TODO: We always use immediate, so we never hit this.
871 * If we start to send tmfs or nops as non-immediate then
872 * we should start checking the cmdsn numbers for mgmt tasks.
874 if (conn->c_stage == ISCSI_CONN_STARTED &&
875 !(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
876 session->queued_cmdsn++;
877 session->cmdsn++;
881 if (session->tt->init_mgmt_task)
882 session->tt->init_mgmt_task(conn, mtask);
884 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
885 hdr->opcode & ISCSI_OPCODE_MASK, hdr->itt,
886 mtask->data_count);
889 static int iscsi_xmit_mtask(struct iscsi_conn *conn)
891 struct iscsi_hdr *hdr = conn->mtask->hdr;
892 int rc;
894 if ((hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
895 conn->session->state = ISCSI_STATE_LOGGING_OUT;
896 spin_unlock_bh(&conn->session->lock);
898 rc = conn->session->tt->xmit_mgmt_task(conn, conn->mtask);
899 spin_lock_bh(&conn->session->lock);
900 if (rc)
901 return rc;
903 /* done with this in-progress mtask */
904 conn->mtask = NULL;
905 return 0;
908 static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
910 struct iscsi_session *session = conn->session;
913 * Check for iSCSI window and take care of CmdSN wrap-around
915 if (!iscsi_sna_lte(session->queued_cmdsn, session->max_cmdsn)) {
916 debug_scsi("iSCSI CmdSN closed. ExpCmdSn %u MaxCmdSN %u "
917 "CmdSN %u/%u\n", session->exp_cmdsn,
918 session->max_cmdsn, session->cmdsn,
919 session->queued_cmdsn);
920 return -ENOSPC;
922 return 0;
925 static int iscsi_xmit_ctask(struct iscsi_conn *conn)
927 struct iscsi_cmd_task *ctask = conn->ctask;
928 int rc;
930 __iscsi_get_ctask(ctask);
931 spin_unlock_bh(&conn->session->lock);
932 rc = conn->session->tt->xmit_cmd_task(conn, ctask);
933 spin_lock_bh(&conn->session->lock);
934 __iscsi_put_ctask(ctask);
935 if (!rc)
936 /* done with this ctask */
937 conn->ctask = NULL;
938 return rc;
942 * iscsi_requeue_ctask - requeue ctask to run from session workqueue
943 * @ctask: ctask to requeue
945 * LLDs that need to run a ctask from the session workqueue should call
946 * this. The session lock must be held.
948 void iscsi_requeue_ctask(struct iscsi_cmd_task *ctask)
950 struct iscsi_conn *conn = ctask->conn;
952 list_move_tail(&ctask->running, &conn->requeue);
953 scsi_queue_work(conn->session->host, &conn->xmitwork);
955 EXPORT_SYMBOL_GPL(iscsi_requeue_ctask);
958 * iscsi_data_xmit - xmit any command into the scheduled connection
959 * @conn: iscsi connection
961 * Notes:
962 * The function can return -EAGAIN in which case the caller must
963 * re-schedule it again later or recover. '0' return code means
964 * successful xmit.
966 static int iscsi_data_xmit(struct iscsi_conn *conn)
968 int rc = 0;
970 spin_lock_bh(&conn->session->lock);
971 if (unlikely(conn->suspend_tx)) {
972 debug_scsi("conn %d Tx suspended!\n", conn->id);
973 spin_unlock_bh(&conn->session->lock);
974 return -ENODATA;
977 if (conn->ctask) {
978 rc = iscsi_xmit_ctask(conn);
979 if (rc)
980 goto again;
983 if (conn->mtask) {
984 rc = iscsi_xmit_mtask(conn);
985 if (rc)
986 goto again;
990 * process mgmt pdus like nops before commands since we should
991 * only have one nop-out as a ping from us and targets should not
992 * overflow us with nop-ins
994 check_mgmt:
995 while (!list_empty(&conn->mgmtqueue)) {
996 conn->mtask = list_entry(conn->mgmtqueue.next,
997 struct iscsi_mgmt_task, running);
998 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
999 iscsi_free_mgmt_task(conn, conn->mtask);
1000 conn->mtask = NULL;
1001 continue;
1004 iscsi_prep_mtask(conn, conn->mtask);
1005 list_move_tail(conn->mgmtqueue.next, &conn->mgmt_run_list);
1006 rc = iscsi_xmit_mtask(conn);
1007 if (rc)
1008 goto again;
1011 /* process pending command queue */
1012 while (!list_empty(&conn->xmitqueue)) {
1013 if (conn->tmf_state == TMF_QUEUED)
1014 break;
1016 conn->ctask = list_entry(conn->xmitqueue.next,
1017 struct iscsi_cmd_task, running);
1018 if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
1019 fail_command(conn, conn->ctask, DID_IMM_RETRY << 16);
1020 continue;
1022 if (iscsi_prep_scsi_cmd_pdu(conn->ctask)) {
1023 fail_command(conn, conn->ctask, DID_ABORT << 16);
1024 continue;
1027 conn->ctask->state = ISCSI_TASK_RUNNING;
1028 list_move_tail(conn->xmitqueue.next, &conn->run_list);
1029 rc = iscsi_xmit_ctask(conn);
1030 if (rc)
1031 goto again;
1033 * we could continuously get new ctask requests so
1034 * we need to check the mgmt queue for nops that need to
1035 * be sent to aviod starvation
1037 if (!list_empty(&conn->mgmtqueue))
1038 goto check_mgmt;
1041 while (!list_empty(&conn->requeue)) {
1042 if (conn->session->fast_abort && conn->tmf_state != TMF_INITIAL)
1043 break;
1046 * we always do fastlogout - conn stop code will clean up.
1048 if (conn->session->state == ISCSI_STATE_LOGGING_OUT)
1049 break;
1051 conn->ctask = list_entry(conn->requeue.next,
1052 struct iscsi_cmd_task, running);
1053 conn->ctask->state = ISCSI_TASK_RUNNING;
1054 list_move_tail(conn->requeue.next, &conn->run_list);
1055 rc = iscsi_xmit_ctask(conn);
1056 if (rc)
1057 goto again;
1058 if (!list_empty(&conn->mgmtqueue))
1059 goto check_mgmt;
1061 spin_unlock_bh(&conn->session->lock);
1062 return -ENODATA;
1064 again:
1065 if (unlikely(conn->suspend_tx))
1066 rc = -ENODATA;
1067 spin_unlock_bh(&conn->session->lock);
1068 return rc;
1071 static void iscsi_xmitworker(struct work_struct *work)
1073 struct iscsi_conn *conn =
1074 container_of(work, struct iscsi_conn, xmitwork);
1075 int rc;
1077 * serialize Xmit worker on a per-connection basis.
1079 do {
1080 rc = iscsi_data_xmit(conn);
1081 } while (rc >= 0 || rc == -EAGAIN);
1084 enum {
1085 FAILURE_BAD_HOST = 1,
1086 FAILURE_SESSION_FAILED,
1087 FAILURE_SESSION_FREED,
1088 FAILURE_WINDOW_CLOSED,
1089 FAILURE_OOM,
1090 FAILURE_SESSION_TERMINATE,
1091 FAILURE_SESSION_IN_RECOVERY,
1092 FAILURE_SESSION_RECOVERY_TIMEOUT,
1093 FAILURE_SESSION_LOGGING_OUT,
1094 FAILURE_SESSION_NOT_READY,
1097 int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
1099 struct Scsi_Host *host;
1100 int reason = 0;
1101 struct iscsi_session *session;
1102 struct iscsi_conn *conn;
1103 struct iscsi_cmd_task *ctask = NULL;
1105 sc->scsi_done = done;
1106 sc->result = 0;
1107 sc->SCp.ptr = NULL;
1109 host = sc->device->host;
1110 spin_unlock(host->host_lock);
1112 session = iscsi_hostdata(host->hostdata);
1113 spin_lock(&session->lock);
1115 reason = iscsi_session_chkready(session_to_cls(session));
1116 if (reason) {
1117 sc->result = reason;
1118 goto fault;
1122 * ISCSI_STATE_FAILED is a temp. state. The recovery
1123 * code will decide what is best to do with command queued
1124 * during this time
1126 if (session->state != ISCSI_STATE_LOGGED_IN &&
1127 session->state != ISCSI_STATE_FAILED) {
1129 * to handle the race between when we set the recovery state
1130 * and block the session we requeue here (commands could
1131 * be entering our queuecommand while a block is starting
1132 * up because the block code is not locked)
1134 switch (session->state) {
1135 case ISCSI_STATE_IN_RECOVERY:
1136 reason = FAILURE_SESSION_IN_RECOVERY;
1137 sc->result = DID_IMM_RETRY << 16;
1138 break;
1139 case ISCSI_STATE_LOGGING_OUT:
1140 reason = FAILURE_SESSION_LOGGING_OUT;
1141 sc->result = DID_IMM_RETRY << 16;
1142 break;
1143 case ISCSI_STATE_RECOVERY_FAILED:
1144 reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
1145 sc->result = DID_NO_CONNECT << 16;
1146 break;
1147 case ISCSI_STATE_TERMINATE:
1148 reason = FAILURE_SESSION_TERMINATE;
1149 sc->result = DID_NO_CONNECT << 16;
1150 break;
1151 default:
1152 reason = FAILURE_SESSION_FREED;
1153 sc->result = DID_NO_CONNECT << 16;
1155 goto fault;
1158 conn = session->leadconn;
1159 if (!conn) {
1160 reason = FAILURE_SESSION_FREED;
1161 sc->result = DID_NO_CONNECT << 16;
1162 goto fault;
1165 if (iscsi_check_cmdsn_window_closed(conn)) {
1166 reason = FAILURE_WINDOW_CLOSED;
1167 goto reject;
1170 if (!__kfifo_get(session->cmdpool.queue, (void*)&ctask,
1171 sizeof(void*))) {
1172 reason = FAILURE_OOM;
1173 goto reject;
1175 session->queued_cmdsn++;
1177 sc->SCp.phase = session->age;
1178 sc->SCp.ptr = (char *)ctask;
1180 atomic_set(&ctask->refcount, 1);
1181 ctask->state = ISCSI_TASK_PENDING;
1182 ctask->conn = conn;
1183 ctask->sc = sc;
1184 INIT_LIST_HEAD(&ctask->running);
1186 list_add_tail(&ctask->running, &conn->xmitqueue);
1187 spin_unlock(&session->lock);
1189 scsi_queue_work(host, &conn->xmitwork);
1190 spin_lock(host->host_lock);
1191 return 0;
1193 reject:
1194 spin_unlock(&session->lock);
1195 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
1196 spin_lock(host->host_lock);
1197 return SCSI_MLQUEUE_HOST_BUSY;
1199 fault:
1200 spin_unlock(&session->lock);
1201 debug_scsi("iscsi: cmd 0x%x is not queued (%d)\n", sc->cmnd[0], reason);
1202 if (!scsi_bidi_cmnd(sc))
1203 scsi_set_resid(sc, scsi_bufflen(sc));
1204 else {
1205 scsi_out(sc)->resid = scsi_out(sc)->length;
1206 scsi_in(sc)->resid = scsi_in(sc)->length;
1208 sc->scsi_done(sc);
1209 spin_lock(host->host_lock);
1210 return 0;
1212 EXPORT_SYMBOL_GPL(iscsi_queuecommand);
1214 int iscsi_change_queue_depth(struct scsi_device *sdev, int depth)
1216 if (depth > ISCSI_MAX_CMD_PER_LUN)
1217 depth = ISCSI_MAX_CMD_PER_LUN;
1218 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
1219 return sdev->queue_depth;
1221 EXPORT_SYMBOL_GPL(iscsi_change_queue_depth);
1223 void iscsi_session_recovery_timedout(struct iscsi_cls_session *cls_session)
1225 struct iscsi_session *session = class_to_transport_session(cls_session);
1227 spin_lock_bh(&session->lock);
1228 if (session->state != ISCSI_STATE_LOGGED_IN) {
1229 session->state = ISCSI_STATE_RECOVERY_FAILED;
1230 if (session->leadconn)
1231 wake_up(&session->leadconn->ehwait);
1233 spin_unlock_bh(&session->lock);
1235 EXPORT_SYMBOL_GPL(iscsi_session_recovery_timedout);
1237 int iscsi_eh_host_reset(struct scsi_cmnd *sc)
1239 struct Scsi_Host *host = sc->device->host;
1240 struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1241 struct iscsi_conn *conn = session->leadconn;
1243 mutex_lock(&session->eh_mutex);
1244 spin_lock_bh(&session->lock);
1245 if (session->state == ISCSI_STATE_TERMINATE) {
1246 failed:
1247 debug_scsi("failing host reset: session terminated "
1248 "[CID %d age %d]\n", conn->id, session->age);
1249 spin_unlock_bh(&session->lock);
1250 mutex_unlock(&session->eh_mutex);
1251 return FAILED;
1254 spin_unlock_bh(&session->lock);
1255 mutex_unlock(&session->eh_mutex);
1257 * we drop the lock here but the leadconn cannot be destoyed while
1258 * we are in the scsi eh
1260 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1262 debug_scsi("iscsi_eh_host_reset wait for relogin\n");
1263 wait_event_interruptible(conn->ehwait,
1264 session->state == ISCSI_STATE_TERMINATE ||
1265 session->state == ISCSI_STATE_LOGGED_IN ||
1266 session->state == ISCSI_STATE_RECOVERY_FAILED);
1267 if (signal_pending(current))
1268 flush_signals(current);
1270 mutex_lock(&session->eh_mutex);
1271 spin_lock_bh(&session->lock);
1272 if (session->state == ISCSI_STATE_LOGGED_IN)
1273 iscsi_session_printk(KERN_INFO, session,
1274 "host reset succeeded\n");
1275 else
1276 goto failed;
1277 spin_unlock_bh(&session->lock);
1278 mutex_unlock(&session->eh_mutex);
1279 return SUCCESS;
1281 EXPORT_SYMBOL_GPL(iscsi_eh_host_reset);
1283 static void iscsi_tmf_timedout(unsigned long data)
1285 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1286 struct iscsi_session *session = conn->session;
1288 spin_lock(&session->lock);
1289 if (conn->tmf_state == TMF_QUEUED) {
1290 conn->tmf_state = TMF_TIMEDOUT;
1291 debug_scsi("tmf timedout\n");
1292 /* unblock eh_abort() */
1293 wake_up(&conn->ehwait);
1295 spin_unlock(&session->lock);
1298 static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
1299 struct iscsi_tm *hdr, int age,
1300 int timeout)
1302 struct iscsi_session *session = conn->session;
1303 struct iscsi_mgmt_task *mtask;
1305 mtask = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)hdr,
1306 NULL, 0);
1307 if (!mtask) {
1308 spin_unlock_bh(&session->lock);
1309 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1310 spin_lock_bh(&session->lock);
1311 debug_scsi("tmf exec failure\n");
1312 return -EPERM;
1314 conn->tmfcmd_pdus_cnt++;
1315 conn->tmf_timer.expires = timeout * HZ + jiffies;
1316 conn->tmf_timer.function = iscsi_tmf_timedout;
1317 conn->tmf_timer.data = (unsigned long)conn;
1318 add_timer(&conn->tmf_timer);
1319 debug_scsi("tmf set timeout\n");
1321 spin_unlock_bh(&session->lock);
1322 mutex_unlock(&session->eh_mutex);
1323 scsi_queue_work(session->host, &conn->xmitwork);
1326 * block eh thread until:
1328 * 1) tmf response
1329 * 2) tmf timeout
1330 * 3) session is terminated or restarted or userspace has
1331 * given up on recovery
1333 wait_event_interruptible(conn->ehwait, age != session->age ||
1334 session->state != ISCSI_STATE_LOGGED_IN ||
1335 conn->tmf_state != TMF_QUEUED);
1336 if (signal_pending(current))
1337 flush_signals(current);
1338 del_timer_sync(&conn->tmf_timer);
1340 mutex_lock(&session->eh_mutex);
1341 spin_lock_bh(&session->lock);
1342 /* if the session drops it will clean up the mtask */
1343 if (age != session->age ||
1344 session->state != ISCSI_STATE_LOGGED_IN)
1345 return -ENOTCONN;
1346 return 0;
1350 * Fail commands. session lock held and recv side suspended and xmit
1351 * thread flushed
1353 static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
1354 int error)
1356 struct iscsi_cmd_task *ctask, *tmp;
1358 if (conn->ctask && (conn->ctask->sc->device->lun == lun || lun == -1))
1359 conn->ctask = NULL;
1361 /* flush pending */
1362 list_for_each_entry_safe(ctask, tmp, &conn->xmitqueue, running) {
1363 if (lun == ctask->sc->device->lun || lun == -1) {
1364 debug_scsi("failing pending sc %p itt 0x%x\n",
1365 ctask->sc, ctask->itt);
1366 fail_command(conn, ctask, error << 16);
1370 list_for_each_entry_safe(ctask, tmp, &conn->requeue, running) {
1371 if (lun == ctask->sc->device->lun || lun == -1) {
1372 debug_scsi("failing requeued sc %p itt 0x%x\n",
1373 ctask->sc, ctask->itt);
1374 fail_command(conn, ctask, error << 16);
1378 /* fail all other running */
1379 list_for_each_entry_safe(ctask, tmp, &conn->run_list, running) {
1380 if (lun == ctask->sc->device->lun || lun == -1) {
1381 debug_scsi("failing in progress sc %p itt 0x%x\n",
1382 ctask->sc, ctask->itt);
1383 fail_command(conn, ctask, DID_BUS_BUSY << 16);
1388 static void iscsi_suspend_tx(struct iscsi_conn *conn)
1390 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1391 scsi_flush_work(conn->session->host);
1394 static void iscsi_start_tx(struct iscsi_conn *conn)
1396 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
1397 scsi_queue_work(conn->session->host, &conn->xmitwork);
1400 static enum scsi_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
1402 struct iscsi_cls_session *cls_session;
1403 struct iscsi_session *session;
1404 struct iscsi_conn *conn;
1405 enum scsi_eh_timer_return rc = EH_NOT_HANDLED;
1407 cls_session = starget_to_session(scsi_target(scmd->device));
1408 session = class_to_transport_session(cls_session);
1410 debug_scsi("scsi cmd %p timedout\n", scmd);
1412 spin_lock(&session->lock);
1413 if (session->state != ISCSI_STATE_LOGGED_IN) {
1415 * We are probably in the middle of iscsi recovery so let
1416 * that complete and handle the error.
1418 rc = EH_RESET_TIMER;
1419 goto done;
1422 conn = session->leadconn;
1423 if (!conn) {
1424 /* In the middle of shuting down */
1425 rc = EH_RESET_TIMER;
1426 goto done;
1429 if (!conn->recv_timeout && !conn->ping_timeout)
1430 goto done;
1432 * if the ping timedout then we are in the middle of cleaning up
1433 * and can let the iscsi eh handle it
1435 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
1436 (conn->ping_timeout * HZ), jiffies))
1437 rc = EH_RESET_TIMER;
1439 * if we are about to check the transport then give the command
1440 * more time
1442 if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
1443 jiffies))
1444 rc = EH_RESET_TIMER;
1445 /* if in the middle of checking the transport then give us more time */
1446 if (conn->ping_mtask)
1447 rc = EH_RESET_TIMER;
1448 done:
1449 spin_unlock(&session->lock);
1450 debug_scsi("return %s\n", rc == EH_RESET_TIMER ? "timer reset" : "nh");
1451 return rc;
1454 static void iscsi_check_transport_timeouts(unsigned long data)
1456 struct iscsi_conn *conn = (struct iscsi_conn *)data;
1457 struct iscsi_session *session = conn->session;
1458 unsigned long recv_timeout, next_timeout = 0, last_recv;
1460 spin_lock(&session->lock);
1461 if (session->state != ISCSI_STATE_LOGGED_IN)
1462 goto done;
1464 recv_timeout = conn->recv_timeout;
1465 if (!recv_timeout)
1466 goto done;
1468 recv_timeout *= HZ;
1469 last_recv = conn->last_recv;
1470 if (conn->ping_mtask &&
1471 time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
1472 jiffies)) {
1473 iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
1474 "expired, last rx %lu, last ping %lu, "
1475 "now %lu\n", conn->ping_timeout, last_recv,
1476 conn->last_ping, jiffies);
1477 spin_unlock(&session->lock);
1478 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1479 return;
1482 if (time_before_eq(last_recv + recv_timeout, jiffies)) {
1483 /* send a ping to try to provoke some traffic */
1484 debug_scsi("Sending nopout as ping on conn %p\n", conn);
1485 iscsi_send_nopout(conn, NULL);
1486 next_timeout = conn->last_ping + (conn->ping_timeout * HZ);
1487 } else
1488 next_timeout = last_recv + recv_timeout;
1490 debug_scsi("Setting next tmo %lu\n", next_timeout);
1491 mod_timer(&conn->transport_timer, next_timeout);
1492 done:
1493 spin_unlock(&session->lock);
1496 static void iscsi_prep_abort_task_pdu(struct iscsi_cmd_task *ctask,
1497 struct iscsi_tm *hdr)
1499 memset(hdr, 0, sizeof(*hdr));
1500 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1501 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK & ISCSI_FLAG_TM_FUNC_MASK;
1502 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1503 memcpy(hdr->lun, ctask->hdr->lun, sizeof(hdr->lun));
1504 hdr->rtt = ctask->hdr->itt;
1505 hdr->refcmdsn = ctask->hdr->cmdsn;
1508 int iscsi_eh_abort(struct scsi_cmnd *sc)
1510 struct Scsi_Host *host = sc->device->host;
1511 struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1512 struct iscsi_conn *conn;
1513 struct iscsi_cmd_task *ctask;
1514 struct iscsi_tm *hdr;
1515 int rc, age;
1517 mutex_lock(&session->eh_mutex);
1518 spin_lock_bh(&session->lock);
1520 * if session was ISCSI_STATE_IN_RECOVERY then we may not have
1521 * got the command.
1523 if (!sc->SCp.ptr) {
1524 debug_scsi("sc never reached iscsi layer or it completed.\n");
1525 spin_unlock_bh(&session->lock);
1526 mutex_unlock(&session->eh_mutex);
1527 return SUCCESS;
1531 * If we are not logged in or we have started a new session
1532 * then let the host reset code handle this
1534 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN ||
1535 sc->SCp.phase != session->age) {
1536 spin_unlock_bh(&session->lock);
1537 mutex_unlock(&session->eh_mutex);
1538 return FAILED;
1541 conn = session->leadconn;
1542 conn->eh_abort_cnt++;
1543 age = session->age;
1545 ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
1546 debug_scsi("aborting [sc %p itt 0x%x]\n", sc, ctask->itt);
1548 /* ctask completed before time out */
1549 if (!ctask->sc) {
1550 debug_scsi("sc completed while abort in progress\n");
1551 goto success;
1554 if (ctask->state == ISCSI_TASK_PENDING) {
1555 fail_command(conn, ctask, DID_ABORT << 16);
1556 goto success;
1559 /* only have one tmf outstanding at a time */
1560 if (conn->tmf_state != TMF_INITIAL)
1561 goto failed;
1562 conn->tmf_state = TMF_QUEUED;
1564 hdr = &conn->tmhdr;
1565 iscsi_prep_abort_task_pdu(ctask, hdr);
1567 if (iscsi_exec_task_mgmt_fn(conn, hdr, age, session->abort_timeout)) {
1568 rc = FAILED;
1569 goto failed;
1572 switch (conn->tmf_state) {
1573 case TMF_SUCCESS:
1574 spin_unlock_bh(&session->lock);
1575 iscsi_suspend_tx(conn);
1577 * clean up task if aborted. grab the recv lock as a writer
1579 write_lock_bh(conn->recv_lock);
1580 spin_lock(&session->lock);
1581 fail_command(conn, ctask, DID_ABORT << 16);
1582 conn->tmf_state = TMF_INITIAL;
1583 spin_unlock(&session->lock);
1584 write_unlock_bh(conn->recv_lock);
1585 iscsi_start_tx(conn);
1586 goto success_unlocked;
1587 case TMF_TIMEDOUT:
1588 spin_unlock_bh(&session->lock);
1589 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1590 goto failed_unlocked;
1591 case TMF_NOT_FOUND:
1592 if (!sc->SCp.ptr) {
1593 conn->tmf_state = TMF_INITIAL;
1594 /* ctask completed before tmf abort response */
1595 debug_scsi("sc completed while abort in progress\n");
1596 goto success;
1598 /* fall through */
1599 default:
1600 conn->tmf_state = TMF_INITIAL;
1601 goto failed;
1604 success:
1605 spin_unlock_bh(&session->lock);
1606 success_unlocked:
1607 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
1608 mutex_unlock(&session->eh_mutex);
1609 return SUCCESS;
1611 failed:
1612 spin_unlock_bh(&session->lock);
1613 failed_unlocked:
1614 debug_scsi("abort failed [sc %p itt 0x%x]\n", sc,
1615 ctask ? ctask->itt : 0);
1616 mutex_unlock(&session->eh_mutex);
1617 return FAILED;
1619 EXPORT_SYMBOL_GPL(iscsi_eh_abort);
1621 static void iscsi_prep_lun_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
1623 memset(hdr, 0, sizeof(*hdr));
1624 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
1625 hdr->flags = ISCSI_TM_FUNC_LOGICAL_UNIT_RESET & ISCSI_FLAG_TM_FUNC_MASK;
1626 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
1627 int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
1628 hdr->rtt = RESERVED_ITT;
1631 int iscsi_eh_device_reset(struct scsi_cmnd *sc)
1633 struct Scsi_Host *host = sc->device->host;
1634 struct iscsi_session *session = iscsi_hostdata(host->hostdata);
1635 struct iscsi_conn *conn;
1636 struct iscsi_tm *hdr;
1637 int rc = FAILED;
1639 debug_scsi("LU Reset [sc %p lun %u]\n", sc, sc->device->lun);
1641 mutex_lock(&session->eh_mutex);
1642 spin_lock_bh(&session->lock);
1644 * Just check if we are not logged in. We cannot check for
1645 * the phase because the reset could come from a ioctl.
1647 if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
1648 goto unlock;
1649 conn = session->leadconn;
1651 /* only have one tmf outstanding at a time */
1652 if (conn->tmf_state != TMF_INITIAL)
1653 goto unlock;
1654 conn->tmf_state = TMF_QUEUED;
1656 hdr = &conn->tmhdr;
1657 iscsi_prep_lun_reset_pdu(sc, hdr);
1659 if (iscsi_exec_task_mgmt_fn(conn, hdr, session->age,
1660 session->lu_reset_timeout)) {
1661 rc = FAILED;
1662 goto unlock;
1665 switch (conn->tmf_state) {
1666 case TMF_SUCCESS:
1667 break;
1668 case TMF_TIMEDOUT:
1669 spin_unlock_bh(&session->lock);
1670 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1671 goto done;
1672 default:
1673 conn->tmf_state = TMF_INITIAL;
1674 goto unlock;
1677 rc = SUCCESS;
1678 spin_unlock_bh(&session->lock);
1680 iscsi_suspend_tx(conn);
1681 /* need to grab the recv lock then session lock */
1682 write_lock_bh(conn->recv_lock);
1683 spin_lock(&session->lock);
1684 fail_all_commands(conn, sc->device->lun, DID_ERROR);
1685 conn->tmf_state = TMF_INITIAL;
1686 spin_unlock(&session->lock);
1687 write_unlock_bh(conn->recv_lock);
1689 iscsi_start_tx(conn);
1690 goto done;
1692 unlock:
1693 spin_unlock_bh(&session->lock);
1694 done:
1695 debug_scsi("iscsi_eh_device_reset %s\n",
1696 rc == SUCCESS ? "SUCCESS" : "FAILED");
1697 mutex_unlock(&session->eh_mutex);
1698 return rc;
1700 EXPORT_SYMBOL_GPL(iscsi_eh_device_reset);
1703 * Pre-allocate a pool of @max items of @item_size. By default, the pool
1704 * should be accessed via kfifo_{get,put} on q->queue.
1705 * Optionally, the caller can obtain the array of object pointers
1706 * by passing in a non-NULL @items pointer
1709 iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1711 int i, num_arrays = 1;
1713 memset(q, 0, sizeof(*q));
1715 q->max = max;
1717 /* If the user passed an items pointer, he wants a copy of
1718 * the array. */
1719 if (items)
1720 num_arrays++;
1721 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1722 if (q->pool == NULL)
1723 goto enomem;
1725 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1726 GFP_KERNEL, NULL);
1727 if (q->queue == ERR_PTR(-ENOMEM))
1728 goto enomem;
1730 for (i = 0; i < max; i++) {
1731 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
1732 if (q->pool[i] == NULL) {
1733 q->max = i;
1734 goto enomem;
1736 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
1739 if (items) {
1740 *items = q->pool + max;
1741 memcpy(*items, q->pool, max * sizeof(void *));
1744 return 0;
1746 enomem:
1747 iscsi_pool_free(q);
1748 return -ENOMEM;
1750 EXPORT_SYMBOL_GPL(iscsi_pool_init);
1752 void iscsi_pool_free(struct iscsi_pool *q)
1754 int i;
1756 for (i = 0; i < q->max; i++)
1757 kfree(q->pool[i]);
1758 if (q->pool)
1759 kfree(q->pool);
1761 EXPORT_SYMBOL_GPL(iscsi_pool_free);
1764 * iSCSI Session's hostdata organization:
1766 * *------------------* <== hostdata_session(host->hostdata)
1767 * | ptr to class sess|
1768 * |------------------| <== iscsi_hostdata(host->hostdata)
1769 * | iscsi_session |
1770 * *------------------*
1773 #define hostdata_privsize(_sz) (sizeof(unsigned long) + _sz + \
1774 _sz % sizeof(unsigned long))
1776 #define hostdata_session(_hostdata) (iscsi_ptr(*(unsigned long *)_hostdata))
1779 * iscsi_session_setup - create iscsi cls session and host and session
1780 * @scsit: scsi transport template
1781 * @iscsit: iscsi transport template
1782 * @cmds_max: scsi host can queue
1783 * @qdepth: scsi host cmds per lun
1784 * @cmd_task_size: LLD ctask private data size
1785 * @mgmt_task_size: LLD mtask private data size
1786 * @initial_cmdsn: initial CmdSN
1787 * @hostno: host no allocated
1789 * This can be used by software iscsi_transports that allocate
1790 * a session per scsi host.
1792 struct iscsi_cls_session *
1793 iscsi_session_setup(struct iscsi_transport *iscsit,
1794 struct scsi_transport_template *scsit,
1795 uint16_t cmds_max, uint16_t qdepth,
1796 int cmd_task_size, int mgmt_task_size,
1797 uint32_t initial_cmdsn, uint32_t *hostno)
1799 struct Scsi_Host *shost;
1800 struct iscsi_session *session;
1801 struct iscsi_cls_session *cls_session;
1802 int cmd_i;
1804 if (qdepth > ISCSI_MAX_CMD_PER_LUN || qdepth < 1) {
1805 if (qdepth != 0)
1806 printk(KERN_ERR "iscsi: invalid queue depth of %d. "
1807 "Queue depth must be between 1 and %d.\n",
1808 qdepth, ISCSI_MAX_CMD_PER_LUN);
1809 qdepth = ISCSI_DEF_CMD_PER_LUN;
1812 if (!is_power_of_2(cmds_max) || cmds_max >= ISCSI_MGMT_ITT_OFFSET ||
1813 cmds_max < 2) {
1814 if (cmds_max != 0)
1815 printk(KERN_ERR "iscsi: invalid can_queue of %d. "
1816 "can_queue must be a power of 2 and between "
1817 "2 and %d - setting to %d.\n", cmds_max,
1818 ISCSI_MGMT_ITT_OFFSET, ISCSI_DEF_XMIT_CMDS_MAX);
1819 cmds_max = ISCSI_DEF_XMIT_CMDS_MAX;
1822 shost = scsi_host_alloc(iscsit->host_template,
1823 hostdata_privsize(sizeof(*session)));
1824 if (!shost)
1825 return NULL;
1827 /* the iscsi layer takes one task for reserve */
1828 shost->can_queue = cmds_max - 1;
1829 shost->cmd_per_lun = qdepth;
1830 shost->max_id = 1;
1831 shost->max_channel = 0;
1832 shost->max_lun = iscsit->max_lun;
1833 shost->max_cmd_len = iscsit->max_cmd_len;
1834 shost->transportt = scsit;
1835 shost->transportt->create_work_queue = 1;
1836 shost->transportt->eh_timed_out = iscsi_eh_cmd_timed_out;
1837 *hostno = shost->host_no;
1839 session = iscsi_hostdata(shost->hostdata);
1840 memset(session, 0, sizeof(struct iscsi_session));
1841 session->host = shost;
1842 session->state = ISCSI_STATE_FREE;
1843 session->fast_abort = 1;
1844 session->lu_reset_timeout = 15;
1845 session->abort_timeout = 10;
1846 session->mgmtpool_max = ISCSI_MGMT_CMDS_MAX;
1847 session->cmds_max = cmds_max;
1848 session->queued_cmdsn = session->cmdsn = initial_cmdsn;
1849 session->exp_cmdsn = initial_cmdsn + 1;
1850 session->max_cmdsn = initial_cmdsn + 1;
1851 session->max_r2t = 1;
1852 session->tt = iscsit;
1853 mutex_init(&session->eh_mutex);
1855 /* initialize SCSI PDU commands pool */
1856 if (iscsi_pool_init(&session->cmdpool, session->cmds_max,
1857 (void***)&session->cmds,
1858 cmd_task_size + sizeof(struct iscsi_cmd_task)))
1859 goto cmdpool_alloc_fail;
1861 /* pre-format cmds pool with ITT */
1862 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
1863 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
1865 if (cmd_task_size)
1866 ctask->dd_data = &ctask[1];
1867 ctask->itt = cmd_i;
1868 INIT_LIST_HEAD(&ctask->running);
1871 spin_lock_init(&session->lock);
1873 /* initialize immediate command pool */
1874 if (iscsi_pool_init(&session->mgmtpool, session->mgmtpool_max,
1875 (void***)&session->mgmt_cmds,
1876 mgmt_task_size + sizeof(struct iscsi_mgmt_task)))
1877 goto mgmtpool_alloc_fail;
1880 /* pre-format immediate cmds pool with ITT */
1881 for (cmd_i = 0; cmd_i < session->mgmtpool_max; cmd_i++) {
1882 struct iscsi_mgmt_task *mtask = session->mgmt_cmds[cmd_i];
1884 if (mgmt_task_size)
1885 mtask->dd_data = &mtask[1];
1886 mtask->itt = ISCSI_MGMT_ITT_OFFSET + cmd_i;
1887 INIT_LIST_HEAD(&mtask->running);
1890 if (scsi_add_host(shost, NULL))
1891 goto add_host_fail;
1893 if (!try_module_get(iscsit->owner))
1894 goto cls_session_fail;
1896 cls_session = iscsi_create_session(shost, iscsit, 0);
1897 if (!cls_session)
1898 goto module_put;
1899 *(unsigned long*)shost->hostdata = (unsigned long)cls_session;
1901 return cls_session;
1903 module_put:
1904 module_put(iscsit->owner);
1905 cls_session_fail:
1906 scsi_remove_host(shost);
1907 add_host_fail:
1908 iscsi_pool_free(&session->mgmtpool);
1909 mgmtpool_alloc_fail:
1910 iscsi_pool_free(&session->cmdpool);
1911 cmdpool_alloc_fail:
1912 scsi_host_put(shost);
1913 return NULL;
1915 EXPORT_SYMBOL_GPL(iscsi_session_setup);
1918 * iscsi_session_teardown - destroy session, host, and cls_session
1919 * shost: scsi host
1921 * This can be used by software iscsi_transports that allocate
1922 * a session per scsi host.
1924 void iscsi_session_teardown(struct iscsi_cls_session *cls_session)
1926 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
1927 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
1928 struct module *owner = cls_session->transport->owner;
1930 iscsi_remove_session(cls_session);
1931 scsi_remove_host(shost);
1933 iscsi_pool_free(&session->mgmtpool);
1934 iscsi_pool_free(&session->cmdpool);
1936 kfree(session->password);
1937 kfree(session->password_in);
1938 kfree(session->username);
1939 kfree(session->username_in);
1940 kfree(session->targetname);
1941 kfree(session->netdev);
1942 kfree(session->hwaddress);
1943 kfree(session->initiatorname);
1945 iscsi_free_session(cls_session);
1946 scsi_host_put(shost);
1947 module_put(owner);
1949 EXPORT_SYMBOL_GPL(iscsi_session_teardown);
1952 * iscsi_conn_setup - create iscsi_cls_conn and iscsi_conn
1953 * @cls_session: iscsi_cls_session
1954 * @conn_idx: cid
1956 struct iscsi_cls_conn *
1957 iscsi_conn_setup(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
1959 struct iscsi_session *session = class_to_transport_session(cls_session);
1960 struct iscsi_conn *conn;
1961 struct iscsi_cls_conn *cls_conn;
1962 char *data;
1964 cls_conn = iscsi_create_conn(cls_session, conn_idx);
1965 if (!cls_conn)
1966 return NULL;
1967 conn = cls_conn->dd_data;
1968 memset(conn, 0, sizeof(*conn));
1970 conn->session = session;
1971 conn->cls_conn = cls_conn;
1972 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
1973 conn->id = conn_idx;
1974 conn->exp_statsn = 0;
1975 conn->tmf_state = TMF_INITIAL;
1977 init_timer(&conn->transport_timer);
1978 conn->transport_timer.data = (unsigned long)conn;
1979 conn->transport_timer.function = iscsi_check_transport_timeouts;
1981 INIT_LIST_HEAD(&conn->run_list);
1982 INIT_LIST_HEAD(&conn->mgmt_run_list);
1983 INIT_LIST_HEAD(&conn->mgmtqueue);
1984 INIT_LIST_HEAD(&conn->xmitqueue);
1985 INIT_LIST_HEAD(&conn->requeue);
1986 INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
1988 /* allocate login_mtask used for the login/text sequences */
1989 spin_lock_bh(&session->lock);
1990 if (!__kfifo_get(session->mgmtpool.queue,
1991 (void*)&conn->login_mtask,
1992 sizeof(void*))) {
1993 spin_unlock_bh(&session->lock);
1994 goto login_mtask_alloc_fail;
1996 spin_unlock_bh(&session->lock);
1998 data = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN, GFP_KERNEL);
1999 if (!data)
2000 goto login_mtask_data_alloc_fail;
2001 conn->login_mtask->data = conn->data = data;
2003 init_timer(&conn->tmf_timer);
2004 init_waitqueue_head(&conn->ehwait);
2006 return cls_conn;
2008 login_mtask_data_alloc_fail:
2009 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2010 sizeof(void*));
2011 login_mtask_alloc_fail:
2012 iscsi_destroy_conn(cls_conn);
2013 return NULL;
2015 EXPORT_SYMBOL_GPL(iscsi_conn_setup);
2018 * iscsi_conn_teardown - teardown iscsi connection
2019 * cls_conn: iscsi class connection
2021 * TODO: we may need to make this into a two step process
2022 * like scsi-mls remove + put host
2024 void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2026 struct iscsi_conn *conn = cls_conn->dd_data;
2027 struct iscsi_session *session = conn->session;
2028 unsigned long flags;
2030 del_timer_sync(&conn->transport_timer);
2032 spin_lock_bh(&session->lock);
2033 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2034 if (session->leadconn == conn) {
2036 * leading connection? then give up on recovery.
2038 session->state = ISCSI_STATE_TERMINATE;
2039 wake_up(&conn->ehwait);
2041 spin_unlock_bh(&session->lock);
2044 * Block until all in-progress commands for this connection
2045 * time out or fail.
2047 for (;;) {
2048 spin_lock_irqsave(session->host->host_lock, flags);
2049 if (!session->host->host_busy) { /* OK for ERL == 0 */
2050 spin_unlock_irqrestore(session->host->host_lock, flags);
2051 break;
2053 spin_unlock_irqrestore(session->host->host_lock, flags);
2054 msleep_interruptible(500);
2055 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2056 "host_busy %d host_failed %d\n",
2057 session->host->host_busy,
2058 session->host->host_failed);
2060 * force eh_abort() to unblock
2062 wake_up(&conn->ehwait);
2065 /* flush queued up work because we free the connection below */
2066 iscsi_suspend_tx(conn);
2068 spin_lock_bh(&session->lock);
2069 kfree(conn->data);
2070 kfree(conn->persistent_address);
2071 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2072 sizeof(void*));
2073 if (session->leadconn == conn)
2074 session->leadconn = NULL;
2075 spin_unlock_bh(&session->lock);
2077 iscsi_destroy_conn(cls_conn);
2079 EXPORT_SYMBOL_GPL(iscsi_conn_teardown);
2081 int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
2083 struct iscsi_conn *conn = cls_conn->dd_data;
2084 struct iscsi_session *session = conn->session;
2086 if (!session) {
2087 iscsi_conn_printk(KERN_ERR, conn,
2088 "can't start unbound connection\n");
2089 return -EPERM;
2092 if ((session->imm_data_en || !session->initial_r2t_en) &&
2093 session->first_burst > session->max_burst) {
2094 iscsi_conn_printk(KERN_INFO, conn, "invalid burst lengths: "
2095 "first_burst %d max_burst %d\n",
2096 session->first_burst, session->max_burst);
2097 return -EINVAL;
2100 if (conn->ping_timeout && !conn->recv_timeout) {
2101 iscsi_conn_printk(KERN_ERR, conn, "invalid recv timeout of "
2102 "zero. Using 5 seconds\n.");
2103 conn->recv_timeout = 5;
2106 if (conn->recv_timeout && !conn->ping_timeout) {
2107 iscsi_conn_printk(KERN_ERR, conn, "invalid ping timeout of "
2108 "zero. Using 5 seconds.\n");
2109 conn->ping_timeout = 5;
2112 spin_lock_bh(&session->lock);
2113 conn->c_stage = ISCSI_CONN_STARTED;
2114 session->state = ISCSI_STATE_LOGGED_IN;
2115 session->queued_cmdsn = session->cmdsn;
2117 conn->last_recv = jiffies;
2118 conn->last_ping = jiffies;
2119 if (conn->recv_timeout && conn->ping_timeout)
2120 mod_timer(&conn->transport_timer,
2121 jiffies + (conn->recv_timeout * HZ));
2123 switch(conn->stop_stage) {
2124 case STOP_CONN_RECOVER:
2126 * unblock eh_abort() if it is blocked. re-try all
2127 * commands after successful recovery
2129 conn->stop_stage = 0;
2130 conn->tmf_state = TMF_INITIAL;
2131 session->age++;
2132 if (session->age == 16)
2133 session->age = 0;
2134 break;
2135 case STOP_CONN_TERM:
2136 conn->stop_stage = 0;
2137 break;
2138 default:
2139 break;
2141 spin_unlock_bh(&session->lock);
2143 iscsi_unblock_session(session_to_cls(session));
2144 wake_up(&conn->ehwait);
2145 return 0;
2147 EXPORT_SYMBOL_GPL(iscsi_conn_start);
2149 static void
2150 flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
2152 struct iscsi_mgmt_task *mtask, *tmp;
2154 /* handle pending */
2155 list_for_each_entry_safe(mtask, tmp, &conn->mgmtqueue, running) {
2156 debug_scsi("flushing pending mgmt task itt 0x%x\n", mtask->itt);
2157 iscsi_free_mgmt_task(conn, mtask);
2160 /* handle running */
2161 list_for_each_entry_safe(mtask, tmp, &conn->mgmt_run_list, running) {
2162 debug_scsi("flushing running mgmt task itt 0x%x\n", mtask->itt);
2163 iscsi_free_mgmt_task(conn, mtask);
2166 conn->mtask = NULL;
2169 static void iscsi_start_session_recovery(struct iscsi_session *session,
2170 struct iscsi_conn *conn, int flag)
2172 int old_stop_stage;
2174 del_timer_sync(&conn->transport_timer);
2176 mutex_lock(&session->eh_mutex);
2177 spin_lock_bh(&session->lock);
2178 if (conn->stop_stage == STOP_CONN_TERM) {
2179 spin_unlock_bh(&session->lock);
2180 mutex_unlock(&session->eh_mutex);
2181 return;
2185 * The LLD either freed/unset the lock on us, or userspace called
2186 * stop but did not create a proper connection (connection was never
2187 * bound or it was unbound then stop was called).
2189 if (!conn->recv_lock) {
2190 spin_unlock_bh(&session->lock);
2191 mutex_unlock(&session->eh_mutex);
2192 return;
2196 * When this is called for the in_login state, we only want to clean
2197 * up the login task and connection. We do not need to block and set
2198 * the recovery state again
2200 if (flag == STOP_CONN_TERM)
2201 session->state = ISCSI_STATE_TERMINATE;
2202 else if (conn->stop_stage != STOP_CONN_RECOVER)
2203 session->state = ISCSI_STATE_IN_RECOVERY;
2205 old_stop_stage = conn->stop_stage;
2206 conn->stop_stage = flag;
2207 conn->c_stage = ISCSI_CONN_STOPPED;
2208 spin_unlock_bh(&session->lock);
2210 iscsi_suspend_tx(conn);
2212 write_lock_bh(conn->recv_lock);
2213 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2214 write_unlock_bh(conn->recv_lock);
2217 * for connection level recovery we should not calculate
2218 * header digest. conn->hdr_size used for optimization
2219 * in hdr_extract() and will be re-negotiated at
2220 * set_param() time.
2222 if (flag == STOP_CONN_RECOVER) {
2223 conn->hdrdgst_en = 0;
2224 conn->datadgst_en = 0;
2225 if (session->state == ISCSI_STATE_IN_RECOVERY &&
2226 old_stop_stage != STOP_CONN_RECOVER) {
2227 debug_scsi("blocking session\n");
2228 iscsi_block_session(session_to_cls(session));
2233 * flush queues.
2235 spin_lock_bh(&session->lock);
2236 fail_all_commands(conn, -1,
2237 STOP_CONN_RECOVER ? DID_BUS_BUSY : DID_ERROR);
2238 flush_control_queues(session, conn);
2239 spin_unlock_bh(&session->lock);
2240 mutex_unlock(&session->eh_mutex);
2243 void iscsi_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
2245 struct iscsi_conn *conn = cls_conn->dd_data;
2246 struct iscsi_session *session = conn->session;
2248 switch (flag) {
2249 case STOP_CONN_RECOVER:
2250 case STOP_CONN_TERM:
2251 iscsi_start_session_recovery(session, conn, flag);
2252 break;
2253 default:
2254 iscsi_conn_printk(KERN_ERR, conn,
2255 "invalid stop flag %d\n", flag);
2258 EXPORT_SYMBOL_GPL(iscsi_conn_stop);
2260 int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
2261 struct iscsi_cls_conn *cls_conn, int is_leading)
2263 struct iscsi_session *session = class_to_transport_session(cls_session);
2264 struct iscsi_conn *conn = cls_conn->dd_data;
2266 spin_lock_bh(&session->lock);
2267 if (is_leading)
2268 session->leadconn = conn;
2269 spin_unlock_bh(&session->lock);
2272 * Unblock xmitworker(), Login Phase will pass through.
2274 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
2275 clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
2276 return 0;
2278 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
2281 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
2282 enum iscsi_param param, char *buf, int buflen)
2284 struct iscsi_conn *conn = cls_conn->dd_data;
2285 struct iscsi_session *session = conn->session;
2286 uint32_t value;
2288 switch(param) {
2289 case ISCSI_PARAM_FAST_ABORT:
2290 sscanf(buf, "%d", &session->fast_abort);
2291 break;
2292 case ISCSI_PARAM_ABORT_TMO:
2293 sscanf(buf, "%d", &session->abort_timeout);
2294 break;
2295 case ISCSI_PARAM_LU_RESET_TMO:
2296 sscanf(buf, "%d", &session->lu_reset_timeout);
2297 break;
2298 case ISCSI_PARAM_PING_TMO:
2299 sscanf(buf, "%d", &conn->ping_timeout);
2300 break;
2301 case ISCSI_PARAM_RECV_TMO:
2302 sscanf(buf, "%d", &conn->recv_timeout);
2303 break;
2304 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2305 sscanf(buf, "%d", &conn->max_recv_dlength);
2306 break;
2307 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2308 sscanf(buf, "%d", &conn->max_xmit_dlength);
2309 break;
2310 case ISCSI_PARAM_HDRDGST_EN:
2311 sscanf(buf, "%d", &conn->hdrdgst_en);
2312 break;
2313 case ISCSI_PARAM_DATADGST_EN:
2314 sscanf(buf, "%d", &conn->datadgst_en);
2315 break;
2316 case ISCSI_PARAM_INITIAL_R2T_EN:
2317 sscanf(buf, "%d", &session->initial_r2t_en);
2318 break;
2319 case ISCSI_PARAM_MAX_R2T:
2320 sscanf(buf, "%d", &session->max_r2t);
2321 break;
2322 case ISCSI_PARAM_IMM_DATA_EN:
2323 sscanf(buf, "%d", &session->imm_data_en);
2324 break;
2325 case ISCSI_PARAM_FIRST_BURST:
2326 sscanf(buf, "%d", &session->first_burst);
2327 break;
2328 case ISCSI_PARAM_MAX_BURST:
2329 sscanf(buf, "%d", &session->max_burst);
2330 break;
2331 case ISCSI_PARAM_PDU_INORDER_EN:
2332 sscanf(buf, "%d", &session->pdu_inorder_en);
2333 break;
2334 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2335 sscanf(buf, "%d", &session->dataseq_inorder_en);
2336 break;
2337 case ISCSI_PARAM_ERL:
2338 sscanf(buf, "%d", &session->erl);
2339 break;
2340 case ISCSI_PARAM_IFMARKER_EN:
2341 sscanf(buf, "%d", &value);
2342 BUG_ON(value);
2343 break;
2344 case ISCSI_PARAM_OFMARKER_EN:
2345 sscanf(buf, "%d", &value);
2346 BUG_ON(value);
2347 break;
2348 case ISCSI_PARAM_EXP_STATSN:
2349 sscanf(buf, "%u", &conn->exp_statsn);
2350 break;
2351 case ISCSI_PARAM_USERNAME:
2352 kfree(session->username);
2353 session->username = kstrdup(buf, GFP_KERNEL);
2354 if (!session->username)
2355 return -ENOMEM;
2356 break;
2357 case ISCSI_PARAM_USERNAME_IN:
2358 kfree(session->username_in);
2359 session->username_in = kstrdup(buf, GFP_KERNEL);
2360 if (!session->username_in)
2361 return -ENOMEM;
2362 break;
2363 case ISCSI_PARAM_PASSWORD:
2364 kfree(session->password);
2365 session->password = kstrdup(buf, GFP_KERNEL);
2366 if (!session->password)
2367 return -ENOMEM;
2368 break;
2369 case ISCSI_PARAM_PASSWORD_IN:
2370 kfree(session->password_in);
2371 session->password_in = kstrdup(buf, GFP_KERNEL);
2372 if (!session->password_in)
2373 return -ENOMEM;
2374 break;
2375 case ISCSI_PARAM_TARGET_NAME:
2376 /* this should not change between logins */
2377 if (session->targetname)
2378 break;
2380 session->targetname = kstrdup(buf, GFP_KERNEL);
2381 if (!session->targetname)
2382 return -ENOMEM;
2383 break;
2384 case ISCSI_PARAM_TPGT:
2385 sscanf(buf, "%d", &session->tpgt);
2386 break;
2387 case ISCSI_PARAM_PERSISTENT_PORT:
2388 sscanf(buf, "%d", &conn->persistent_port);
2389 break;
2390 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2392 * this is the address returned in discovery so it should
2393 * not change between logins.
2395 if (conn->persistent_address)
2396 break;
2398 conn->persistent_address = kstrdup(buf, GFP_KERNEL);
2399 if (!conn->persistent_address)
2400 return -ENOMEM;
2401 break;
2402 default:
2403 return -ENOSYS;
2406 return 0;
2408 EXPORT_SYMBOL_GPL(iscsi_set_param);
2410 int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
2411 enum iscsi_param param, char *buf)
2413 struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
2414 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2415 int len;
2417 switch(param) {
2418 case ISCSI_PARAM_FAST_ABORT:
2419 len = sprintf(buf, "%d\n", session->fast_abort);
2420 break;
2421 case ISCSI_PARAM_ABORT_TMO:
2422 len = sprintf(buf, "%d\n", session->abort_timeout);
2423 break;
2424 case ISCSI_PARAM_LU_RESET_TMO:
2425 len = sprintf(buf, "%d\n", session->lu_reset_timeout);
2426 break;
2427 case ISCSI_PARAM_INITIAL_R2T_EN:
2428 len = sprintf(buf, "%d\n", session->initial_r2t_en);
2429 break;
2430 case ISCSI_PARAM_MAX_R2T:
2431 len = sprintf(buf, "%hu\n", session->max_r2t);
2432 break;
2433 case ISCSI_PARAM_IMM_DATA_EN:
2434 len = sprintf(buf, "%d\n", session->imm_data_en);
2435 break;
2436 case ISCSI_PARAM_FIRST_BURST:
2437 len = sprintf(buf, "%u\n", session->first_burst);
2438 break;
2439 case ISCSI_PARAM_MAX_BURST:
2440 len = sprintf(buf, "%u\n", session->max_burst);
2441 break;
2442 case ISCSI_PARAM_PDU_INORDER_EN:
2443 len = sprintf(buf, "%d\n", session->pdu_inorder_en);
2444 break;
2445 case ISCSI_PARAM_DATASEQ_INORDER_EN:
2446 len = sprintf(buf, "%d\n", session->dataseq_inorder_en);
2447 break;
2448 case ISCSI_PARAM_ERL:
2449 len = sprintf(buf, "%d\n", session->erl);
2450 break;
2451 case ISCSI_PARAM_TARGET_NAME:
2452 len = sprintf(buf, "%s\n", session->targetname);
2453 break;
2454 case ISCSI_PARAM_TPGT:
2455 len = sprintf(buf, "%d\n", session->tpgt);
2456 break;
2457 case ISCSI_PARAM_USERNAME:
2458 len = sprintf(buf, "%s\n", session->username);
2459 break;
2460 case ISCSI_PARAM_USERNAME_IN:
2461 len = sprintf(buf, "%s\n", session->username_in);
2462 break;
2463 case ISCSI_PARAM_PASSWORD:
2464 len = sprintf(buf, "%s\n", session->password);
2465 break;
2466 case ISCSI_PARAM_PASSWORD_IN:
2467 len = sprintf(buf, "%s\n", session->password_in);
2468 break;
2469 default:
2470 return -ENOSYS;
2473 return len;
2475 EXPORT_SYMBOL_GPL(iscsi_session_get_param);
2477 int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
2478 enum iscsi_param param, char *buf)
2480 struct iscsi_conn *conn = cls_conn->dd_data;
2481 int len;
2483 switch(param) {
2484 case ISCSI_PARAM_PING_TMO:
2485 len = sprintf(buf, "%u\n", conn->ping_timeout);
2486 break;
2487 case ISCSI_PARAM_RECV_TMO:
2488 len = sprintf(buf, "%u\n", conn->recv_timeout);
2489 break;
2490 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2491 len = sprintf(buf, "%u\n", conn->max_recv_dlength);
2492 break;
2493 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2494 len = sprintf(buf, "%u\n", conn->max_xmit_dlength);
2495 break;
2496 case ISCSI_PARAM_HDRDGST_EN:
2497 len = sprintf(buf, "%d\n", conn->hdrdgst_en);
2498 break;
2499 case ISCSI_PARAM_DATADGST_EN:
2500 len = sprintf(buf, "%d\n", conn->datadgst_en);
2501 break;
2502 case ISCSI_PARAM_IFMARKER_EN:
2503 len = sprintf(buf, "%d\n", conn->ifmarker_en);
2504 break;
2505 case ISCSI_PARAM_OFMARKER_EN:
2506 len = sprintf(buf, "%d\n", conn->ofmarker_en);
2507 break;
2508 case ISCSI_PARAM_EXP_STATSN:
2509 len = sprintf(buf, "%u\n", conn->exp_statsn);
2510 break;
2511 case ISCSI_PARAM_PERSISTENT_PORT:
2512 len = sprintf(buf, "%d\n", conn->persistent_port);
2513 break;
2514 case ISCSI_PARAM_PERSISTENT_ADDRESS:
2515 len = sprintf(buf, "%s\n", conn->persistent_address);
2516 break;
2517 default:
2518 return -ENOSYS;
2521 return len;
2523 EXPORT_SYMBOL_GPL(iscsi_conn_get_param);
2525 int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2526 char *buf)
2528 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2529 int len;
2531 switch (param) {
2532 case ISCSI_HOST_PARAM_NETDEV_NAME:
2533 if (!session->netdev)
2534 len = sprintf(buf, "%s\n", "default");
2535 else
2536 len = sprintf(buf, "%s\n", session->netdev);
2537 break;
2538 case ISCSI_HOST_PARAM_HWADDRESS:
2539 if (!session->hwaddress)
2540 len = sprintf(buf, "%s\n", "default");
2541 else
2542 len = sprintf(buf, "%s\n", session->hwaddress);
2543 break;
2544 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2545 if (!session->initiatorname)
2546 len = sprintf(buf, "%s\n", "unknown");
2547 else
2548 len = sprintf(buf, "%s\n", session->initiatorname);
2549 break;
2551 default:
2552 return -ENOSYS;
2555 return len;
2557 EXPORT_SYMBOL_GPL(iscsi_host_get_param);
2559 int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2560 char *buf, int buflen)
2562 struct iscsi_session *session = iscsi_hostdata(shost->hostdata);
2564 switch (param) {
2565 case ISCSI_HOST_PARAM_NETDEV_NAME:
2566 if (!session->netdev)
2567 session->netdev = kstrdup(buf, GFP_KERNEL);
2568 break;
2569 case ISCSI_HOST_PARAM_HWADDRESS:
2570 if (!session->hwaddress)
2571 session->hwaddress = kstrdup(buf, GFP_KERNEL);
2572 break;
2573 case ISCSI_HOST_PARAM_INITIATOR_NAME:
2574 if (!session->initiatorname)
2575 session->initiatorname = kstrdup(buf, GFP_KERNEL);
2576 break;
2577 default:
2578 return -ENOSYS;
2581 return 0;
2583 EXPORT_SYMBOL_GPL(iscsi_host_set_param);
2585 MODULE_AUTHOR("Mike Christie");
2586 MODULE_DESCRIPTION("iSCSI library functions");
2587 MODULE_LICENSE("GPL");