2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * See the file COPYING included with this distribution for more details.
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_request.h>
41 #include <scsi/scsi_tcq.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_transport_iscsi.h>
46 #include "iscsi_tcp.h"
48 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>");
50 MODULE_DESCRIPTION("iSCSI/TCP data-path");
51 MODULE_LICENSE("GPL");
52 MODULE_VERSION("0:4.409");
53 /* #define DEBUG_TCP */
54 /* #define DEBUG_SCSI */
58 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
60 #define debug_tcp(fmt...)
64 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
66 #define debug_scsi(fmt...)
76 #define INVALID_SN_DELTA 0xffff
78 static unsigned int iscsi_max_lun
= 512;
79 module_param_named(max_lun
, iscsi_max_lun
, uint
, S_IRUGO
);
82 static kmem_cache_t
*taskcache
;
85 iscsi_buf_init_virt(struct iscsi_buf
*ibuf
, char *vbuf
, int size
)
87 sg_init_one(&ibuf
->sg
, (u8
*)vbuf
, size
);
92 iscsi_buf_init_iov(struct iscsi_buf
*ibuf
, char *vbuf
, int size
)
94 ibuf
->sg
.page
= (void*)vbuf
;
95 ibuf
->sg
.offset
= (unsigned int)-1;
96 ibuf
->sg
.length
= size
;
101 iscsi_buf_iov_base(struct iscsi_buf
*ibuf
)
103 return (char*)ibuf
->sg
.page
+ ibuf
->sent
;
107 iscsi_buf_init_sg(struct iscsi_buf
*ibuf
, struct scatterlist
*sg
)
110 * Fastpath: sg element fits into single page
112 if (sg
->length
+ sg
->offset
<= PAGE_SIZE
&& page_count(sg
->page
) >= 2) {
113 ibuf
->sg
.page
= sg
->page
;
114 ibuf
->sg
.offset
= sg
->offset
;
115 ibuf
->sg
.length
= sg
->length
;
117 iscsi_buf_init_iov(ibuf
, page_address(sg
->page
), sg
->length
);
122 iscsi_buf_left(struct iscsi_buf
*ibuf
)
126 rc
= ibuf
->sg
.length
- ibuf
->sent
;
132 iscsi_hdr_digest(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
135 crypto_digest_digest(conn
->tx_tfm
, &buf
->sg
, 1, crc
);
136 buf
->sg
.length
+= sizeof(uint32_t);
140 iscsi_conn_failure(struct iscsi_conn
*conn
, enum iscsi_err err
)
142 struct iscsi_session
*session
= conn
->session
;
145 spin_lock_irqsave(&session
->lock
, flags
);
146 if (session
->conn_cnt
== 1 || session
->leadconn
== conn
)
147 session
->state
= ISCSI_STATE_FAILED
;
148 spin_unlock_irqrestore(&session
->lock
, flags
);
149 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
150 set_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
151 iscsi_conn_error(iscsi_handle(conn
), err
);
155 iscsi_check_assign_cmdsn(struct iscsi_session
*session
, struct iscsi_nopin
*hdr
)
157 uint32_t max_cmdsn
= be32_to_cpu(hdr
->max_cmdsn
);
158 uint32_t exp_cmdsn
= be32_to_cpu(hdr
->exp_cmdsn
);
160 if (max_cmdsn
< exp_cmdsn
-1 &&
161 max_cmdsn
> exp_cmdsn
- INVALID_SN_DELTA
)
162 return ISCSI_ERR_MAX_CMDSN
;
163 if (max_cmdsn
> session
->max_cmdsn
||
164 max_cmdsn
< session
->max_cmdsn
- INVALID_SN_DELTA
)
165 session
->max_cmdsn
= max_cmdsn
;
166 if (exp_cmdsn
> session
->exp_cmdsn
||
167 exp_cmdsn
< session
->exp_cmdsn
- INVALID_SN_DELTA
)
168 session
->exp_cmdsn
= exp_cmdsn
;
174 iscsi_hdr_extract(struct iscsi_conn
*conn
)
176 struct sk_buff
*skb
= conn
->in
.skb
;
178 if (conn
->in
.copy
>= conn
->hdr_size
&&
179 conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
) {
181 * Zero-copy PDU Header: using connection context
182 * to store header pointer.
184 if (skb_shinfo(skb
)->frag_list
== NULL
&&
185 !skb_shinfo(skb
)->nr_frags
)
186 conn
->in
.hdr
= (struct iscsi_hdr
*)
187 ((char*)skb
->data
+ conn
->in
.offset
);
189 /* ignoring return code since we checked
191 skb_copy_bits(skb
, conn
->in
.offset
,
192 &conn
->hdr
, conn
->hdr_size
);
193 conn
->in
.hdr
= &conn
->hdr
;
195 conn
->in
.offset
+= conn
->hdr_size
;
196 conn
->in
.copy
-= conn
->hdr_size
;
202 * PDU header scattered across SKB's,
203 * copying it... This'll happen quite rarely.
206 if (conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
)
207 conn
->in
.hdr_offset
= 0;
209 hdr_remains
= conn
->hdr_size
- conn
->in
.hdr_offset
;
210 BUG_ON(hdr_remains
<= 0);
212 copylen
= min(conn
->in
.copy
, hdr_remains
);
213 skb_copy_bits(skb
, conn
->in
.offset
,
214 (char*)&conn
->hdr
+ conn
->in
.hdr_offset
, copylen
);
216 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
217 "in.copy %d\n", conn
->in
.hdr_offset
, copylen
,
218 conn
->in
.offset
, conn
->in
.copy
);
220 conn
->in
.offset
+= copylen
;
221 conn
->in
.copy
-= copylen
;
222 if (copylen
< hdr_remains
) {
223 conn
->in_progress
= IN_PROGRESS_HEADER_GATHER
;
224 conn
->in
.hdr_offset
+= copylen
;
227 conn
->in
.hdr
= &conn
->hdr
;
228 conn
->discontiguous_hdr_cnt
++;
229 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
236 iscsi_ctask_cleanup(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
238 struct scsi_cmnd
*sc
= ctask
->sc
;
239 struct iscsi_session
*session
= conn
->session
;
241 spin_lock(&session
->lock
);
243 spin_unlock(&session
->lock
);
246 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
247 struct iscsi_data_task
*dtask
, *n
;
248 /* WRITE: cleanup Data-Out's if any */
249 spin_lock(&conn
->lock
);
250 list_for_each_entry_safe(dtask
, n
, &ctask
->dataqueue
, item
) {
251 list_del(&dtask
->item
);
252 mempool_free(dtask
, ctask
->datapool
);
254 spin_unlock(&conn
->lock
);
256 ctask
->xmstate
= XMSTATE_IDLE
;
259 __kfifo_put(session
->cmdpool
.queue
, (void*)&ctask
, sizeof(void*));
260 spin_unlock(&session
->lock
);
264 * iscsi_cmd_rsp - SCSI Command Response processing
265 * @conn: iscsi connection
266 * @ctask: scsi command task
269 iscsi_cmd_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
272 struct iscsi_cmd_rsp
*rhdr
= (struct iscsi_cmd_rsp
*)conn
->in
.hdr
;
273 struct iscsi_session
*session
= conn
->session
;
274 struct scsi_cmnd
*sc
= ctask
->sc
;
276 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
278 sc
->result
= (DID_ERROR
<< 16);
282 conn
->exp_statsn
= be32_to_cpu(rhdr
->statsn
) + 1;
284 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
286 if (rhdr
->response
!= ISCSI_STATUS_CMD_COMPLETED
) {
287 sc
->result
= (DID_ERROR
<< 16);
291 if (rhdr
->cmd_status
== SAM_STAT_CHECK_CONDITION
&& conn
->senselen
) {
292 int sensecopy
= min(conn
->senselen
, SCSI_SENSE_BUFFERSIZE
);
294 memcpy(sc
->sense_buffer
, conn
->data
+ 2, sensecopy
);
295 debug_scsi("copied %d bytes of sense\n", sensecopy
);
298 if (sc
->sc_data_direction
== DMA_TO_DEVICE
)
301 if (rhdr
->flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
302 int res_count
= be32_to_cpu(rhdr
->residual_count
);
304 if (res_count
> 0 && res_count
<= sc
->request_bufflen
)
305 sc
->resid
= res_count
;
307 sc
->result
= (DID_BAD_TARGET
<< 16) | rhdr
->cmd_status
;
308 } else if (rhdr
->flags
& ISCSI_FLAG_CMD_BIDI_UNDERFLOW
)
309 sc
->result
= (DID_BAD_TARGET
<< 16) | rhdr
->cmd_status
;
310 else if (rhdr
->flags
& ISCSI_FLAG_CMD_OVERFLOW
)
311 sc
->resid
= be32_to_cpu(rhdr
->residual_count
);
314 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
315 (long)sc
, sc
->result
, ctask
->itt
);
316 conn
->scsirsp_pdus_cnt
++;
317 iscsi_ctask_cleanup(conn
, ctask
);
323 * iscsi_data_rsp - SCSI Data-In Response processing
324 * @conn: iscsi connection
325 * @ctask: scsi command task
328 iscsi_data_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
331 struct iscsi_data_rsp
*rhdr
= (struct iscsi_data_rsp
*)conn
->in
.hdr
;
332 struct iscsi_session
*session
= conn
->session
;
333 int datasn
= be32_to_cpu(rhdr
->datasn
);
335 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
339 * setup Data-In byte counter (gets decremented..)
341 ctask
->data_count
= conn
->in
.datalen
;
343 if (conn
->in
.datalen
== 0)
346 if (ctask
->datasn
!= datasn
)
347 return ISCSI_ERR_DATASN
;
351 ctask
->data_offset
= be32_to_cpu(rhdr
->offset
);
352 if (ctask
->data_offset
+ conn
->in
.datalen
> ctask
->total_length
)
353 return ISCSI_ERR_DATA_OFFSET
;
355 if (rhdr
->flags
& ISCSI_FLAG_DATA_STATUS
) {
356 struct scsi_cmnd
*sc
= ctask
->sc
;
358 conn
->exp_statsn
= be32_to_cpu(rhdr
->statsn
) + 1;
359 if (rhdr
->flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
360 int res_count
= be32_to_cpu(rhdr
->residual_count
);
363 res_count
<= sc
->request_bufflen
) {
364 sc
->resid
= res_count
;
365 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
367 sc
->result
= (DID_BAD_TARGET
<< 16) |
369 } else if (rhdr
->flags
& ISCSI_FLAG_CMD_BIDI_UNDERFLOW
)
370 sc
->result
= (DID_BAD_TARGET
<< 16) | rhdr
->cmd_status
;
371 else if (rhdr
->flags
& ISCSI_FLAG_CMD_OVERFLOW
) {
372 sc
->resid
= be32_to_cpu(rhdr
->residual_count
);
373 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
375 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
378 conn
->datain_pdus_cnt
++;
383 * iscsi_solicit_data_init - initialize first Data-Out
384 * @conn: iscsi connection
385 * @ctask: scsi command task
389 * Initialize first Data-Out within this R2T sequence and finds
390 * proper data_offset within this SCSI command.
392 * This function is called with connection lock taken.
395 iscsi_solicit_data_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
396 struct iscsi_r2t_info
*r2t
)
398 struct iscsi_data
*hdr
;
399 struct iscsi_data_task
*dtask
;
400 struct scsi_cmnd
*sc
= ctask
->sc
;
402 dtask
= mempool_alloc(ctask
->datapool
, GFP_ATOMIC
);
405 memset(hdr
, 0, sizeof(struct iscsi_data
));
407 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
408 r2t
->solicit_datasn
++;
409 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
410 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
411 hdr
->itt
= ctask
->hdr
.itt
;
412 hdr
->exp_statsn
= r2t
->exp_statsn
;
413 hdr
->offset
= cpu_to_be32(r2t
->data_offset
);
414 if (r2t
->data_length
> conn
->max_xmit_dlength
) {
415 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
416 r2t
->data_count
= conn
->max_xmit_dlength
;
419 hton24(hdr
->dlength
, r2t
->data_length
);
420 r2t
->data_count
= r2t
->data_length
;
421 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
423 conn
->dataout_pdus_cnt
++;
427 iscsi_buf_init_virt(&r2t
->headbuf
, (char*)hdr
,
428 sizeof(struct iscsi_hdr
));
434 struct scatterlist
*sg
= sc
->request_buffer
;
437 for (i
= 0; i
< sc
->use_sg
; i
++, sg
+= 1) {
438 /* FIXME: prefetch ? */
439 if (sg_count
+ sg
->length
> r2t
->data_offset
) {
444 /* offset within this page */
445 page_offset
= r2t
->data_offset
- sg_count
;
447 /* fill in this buffer */
448 iscsi_buf_init_sg(&r2t
->sendbuf
, sg
);
449 r2t
->sendbuf
.sg
.offset
+= page_offset
;
450 r2t
->sendbuf
.sg
.length
-= page_offset
;
452 /* xmit logic will continue with next one */
456 sg_count
+= sg
->length
;
458 BUG_ON(r2t
->sg
== NULL
);
460 iscsi_buf_init_iov(&ctask
->sendbuf
,
461 (char*)sc
->request_buffer
+ r2t
->data_offset
,
464 list_add(&dtask
->item
, &ctask
->dataqueue
);
468 * iscsi_r2t_rsp - iSCSI R2T Response processing
469 * @conn: iscsi connection
470 * @ctask: scsi command task
473 iscsi_r2t_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
475 struct iscsi_r2t_info
*r2t
;
476 struct iscsi_session
*session
= conn
->session
;
477 struct iscsi_r2t_rsp
*rhdr
= (struct iscsi_r2t_rsp
*)conn
->in
.hdr
;
478 int r2tsn
= be32_to_cpu(rhdr
->r2tsn
);
482 return ISCSI_ERR_AHSLEN
;
484 if (conn
->in
.datalen
)
485 return ISCSI_ERR_DATALEN
;
487 if (ctask
->exp_r2tsn
&& ctask
->exp_r2tsn
!= r2tsn
)
488 return ISCSI_ERR_R2TSN
;
490 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
494 /* FIXME: use R2TSN to detect missing R2T */
496 /* fill-in new R2T associated with the task */
497 spin_lock(&session
->lock
);
498 if (!ctask
->sc
|| ctask
->mtask
||
499 session
->state
!= ISCSI_STATE_LOGGED_IN
) {
500 printk(KERN_INFO
"iscsi_tcp: dropping R2T itt %d in "
501 "recovery...\n", ctask
->itt
);
502 spin_unlock(&session
->lock
);
505 rc
= __kfifo_get(ctask
->r2tpool
.queue
, (void*)&r2t
, sizeof(void*));
508 r2t
->exp_statsn
= rhdr
->statsn
;
509 r2t
->data_length
= be32_to_cpu(rhdr
->data_length
);
510 if (r2t
->data_length
== 0 ||
511 r2t
->data_length
> session
->max_burst
) {
512 spin_unlock(&session
->lock
);
513 return ISCSI_ERR_DATALEN
;
516 r2t
->data_offset
= be32_to_cpu(rhdr
->data_offset
);
517 if (r2t
->data_offset
+ r2t
->data_length
> ctask
->total_length
) {
518 spin_unlock(&session
->lock
);
519 return ISCSI_ERR_DATALEN
;
522 r2t
->ttt
= rhdr
->ttt
; /* no flip */
523 r2t
->solicit_datasn
= 0;
525 iscsi_solicit_data_init(conn
, ctask
, r2t
);
527 ctask
->exp_r2tsn
= r2tsn
+ 1;
528 ctask
->xmstate
|= XMSTATE_SOL_HDR
;
529 __kfifo_put(ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*));
530 __kfifo_put(conn
->writequeue
, (void*)&ctask
, sizeof(void*));
532 schedule_work(&conn
->xmitwork
);
533 conn
->r2t_pdus_cnt
++;
534 spin_unlock(&session
->lock
);
540 iscsi_hdr_recv(struct iscsi_conn
*conn
)
543 struct iscsi_hdr
*hdr
;
544 struct iscsi_cmd_task
*ctask
;
545 struct iscsi_session
*session
= conn
->session
;
546 uint32_t cdgst
, rdgst
= 0;
550 /* verify PDU length */
551 conn
->in
.datalen
= ntoh24(hdr
->dlength
);
552 if (conn
->in
.datalen
> conn
->max_recv_dlength
) {
553 printk(KERN_ERR
"iscsi_tcp: datalen %d > %d\n",
554 conn
->in
.datalen
, conn
->max_recv_dlength
);
555 return ISCSI_ERR_DATALEN
;
557 conn
->data_copied
= 0;
560 conn
->in
.ahslen
= hdr
->hlength
* 4;
561 conn
->in
.offset
+= conn
->in
.ahslen
;
562 conn
->in
.copy
-= conn
->in
.ahslen
;
563 if (conn
->in
.copy
< 0) {
564 printk(KERN_ERR
"iscsi_tcp: can't handle AHS with length "
565 "%d bytes\n", conn
->in
.ahslen
);
566 return ISCSI_ERR_AHSLEN
;
569 /* calculate read padding */
570 conn
->in
.padding
= conn
->in
.datalen
& (ISCSI_PAD_LEN
-1);
571 if (conn
->in
.padding
) {
572 conn
->in
.padding
= ISCSI_PAD_LEN
- conn
->in
.padding
;
573 debug_scsi("read padding %d bytes\n", conn
->in
.padding
);
576 if (conn
->hdrdgst_en
) {
577 struct scatterlist sg
;
579 sg_init_one(&sg
, (u8
*)hdr
,
580 sizeof(struct iscsi_hdr
) + conn
->in
.ahslen
);
581 crypto_digest_digest(conn
->rx_tfm
, &sg
, 1, (u8
*)&cdgst
);
582 rdgst
= *(uint32_t*)((char*)hdr
+ sizeof(struct iscsi_hdr
) +
586 /* save opcode for later */
587 conn
->in
.opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
589 /* verify itt (itt encoding: age+cid+itt) */
590 if (hdr
->itt
!= cpu_to_be32(ISCSI_RESERVED_TAG
)) {
591 if ((hdr
->itt
& AGE_MASK
) !=
592 (session
->age
<< AGE_SHIFT
)) {
593 printk(KERN_ERR
"iscsi_tcp: received itt %x expected "
594 "session age (%x)\n", hdr
->itt
,
595 session
->age
& AGE_MASK
);
596 return ISCSI_ERR_BAD_ITT
;
599 if ((hdr
->itt
& CID_MASK
) != (conn
->id
<< CID_SHIFT
)) {
600 printk(KERN_ERR
"iscsi_tcp: received itt %x, expected "
601 "CID (%x)\n", hdr
->itt
, conn
->id
);
602 return ISCSI_ERR_BAD_ITT
;
604 conn
->in
.itt
= hdr
->itt
& ITT_MASK
;
606 conn
->in
.itt
= hdr
->itt
;
608 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
609 hdr
->opcode
, conn
->in
.offset
, conn
->in
.copy
,
610 conn
->in
.ahslen
, conn
->in
.datalen
);
612 if (conn
->in
.itt
< session
->cmds_max
) {
613 if (conn
->hdrdgst_en
&& cdgst
!= rdgst
) {
614 printk(KERN_ERR
"iscsi_tcp: itt %x: hdrdgst error "
615 "recv 0x%x calc 0x%x\n", conn
->in
.itt
, rdgst
,
617 return ISCSI_ERR_HDR_DGST
;
620 ctask
= (struct iscsi_cmd_task
*)session
->cmds
[conn
->in
.itt
];
623 printk(KERN_INFO
"iscsi_tcp: dropping ctask with "
624 "itt 0x%x\n", ctask
->itt
);
625 conn
->in
.datalen
= 0; /* force drop */
629 if (ctask
->sc
->SCp
.phase
!= session
->age
) {
630 printk(KERN_ERR
"iscsi_tcp: ctask's session age %d, "
631 "expected %d\n", ctask
->sc
->SCp
.phase
,
633 return ISCSI_ERR_SESSION_FAILED
;
636 conn
->in
.ctask
= ctask
;
638 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
639 hdr
->opcode
, conn
->id
, (long)ctask
->sc
,
640 ctask
->itt
, conn
->in
.datalen
);
642 switch(conn
->in
.opcode
) {
643 case ISCSI_OP_SCSI_CMD_RSP
:
644 BUG_ON((void*)ctask
!= ctask
->sc
->SCp
.ptr
);
645 if (!conn
->in
.datalen
)
646 rc
= iscsi_cmd_rsp(conn
, ctask
);
649 * got sense or response data; copying PDU
650 * Header to the connection's header
653 memcpy(&conn
->hdr
, hdr
,
654 sizeof(struct iscsi_hdr
));
656 case ISCSI_OP_SCSI_DATA_IN
:
657 BUG_ON((void*)ctask
!= ctask
->sc
->SCp
.ptr
);
658 /* save flags for non-exceptional status */
659 conn
->in
.flags
= hdr
->flags
;
660 /* save cmd_status for sense data */
661 conn
->in
.cmd_status
=
662 ((struct iscsi_data_rsp
*)hdr
)->cmd_status
;
663 rc
= iscsi_data_rsp(conn
, ctask
);
666 BUG_ON((void*)ctask
!= ctask
->sc
->SCp
.ptr
);
667 if (ctask
->sc
->sc_data_direction
== DMA_TO_DEVICE
)
668 rc
= iscsi_r2t_rsp(conn
, ctask
);
670 rc
= ISCSI_ERR_PROTO
;
673 rc
= ISCSI_ERR_BAD_OPCODE
;
676 } else if (conn
->in
.itt
>= ISCSI_MGMT_ITT_OFFSET
&&
677 conn
->in
.itt
< ISCSI_MGMT_ITT_OFFSET
+
678 session
->mgmtpool_max
) {
679 struct iscsi_mgmt_task
*mtask
= (struct iscsi_mgmt_task
*)
680 session
->mgmt_cmds
[conn
->in
.itt
-
681 ISCSI_MGMT_ITT_OFFSET
];
683 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
684 conn
->in
.opcode
, conn
->id
, mtask
->itt
,
687 switch(conn
->in
.opcode
) {
688 case ISCSI_OP_LOGIN_RSP
:
689 case ISCSI_OP_TEXT_RSP
:
690 case ISCSI_OP_LOGOUT_RSP
:
691 rc
= iscsi_check_assign_cmdsn(session
,
692 (struct iscsi_nopin
*)hdr
);
696 if (!conn
->in
.datalen
) {
697 rc
= iscsi_recv_pdu(iscsi_handle(conn
), hdr
,
699 if (conn
->login_mtask
!= mtask
) {
700 spin_lock(&session
->lock
);
701 __kfifo_put(session
->mgmtpool
.queue
,
702 (void*)&mtask
, sizeof(void*));
703 spin_unlock(&session
->lock
);
707 case ISCSI_OP_SCSI_TMFUNC_RSP
:
708 rc
= iscsi_check_assign_cmdsn(session
,
709 (struct iscsi_nopin
*)hdr
);
713 if (conn
->in
.datalen
|| conn
->in
.ahslen
) {
714 rc
= ISCSI_ERR_PROTO
;
717 conn
->tmfrsp_pdus_cnt
++;
718 spin_lock(&session
->lock
);
719 if (conn
->tmabort_state
== TMABORT_INITIAL
) {
720 __kfifo_put(session
->mgmtpool
.queue
,
721 (void*)&mtask
, sizeof(void*));
722 conn
->tmabort_state
=
723 ((struct iscsi_tm_rsp
*)hdr
)->
724 response
== ISCSI_TMF_RSP_COMPLETE
?
725 TMABORT_SUCCESS
:TMABORT_FAILED
;
726 /* unblock eh_abort() */
727 wake_up(&conn
->ehwait
);
729 spin_unlock(&session
->lock
);
731 case ISCSI_OP_NOOP_IN
:
732 if (hdr
->ttt
!= ISCSI_RESERVED_TAG
) {
733 rc
= ISCSI_ERR_PROTO
;
736 rc
= iscsi_check_assign_cmdsn(session
,
737 (struct iscsi_nopin
*)hdr
);
740 conn
->exp_statsn
= be32_to_cpu(hdr
->statsn
) + 1;
742 if (!conn
->in
.datalen
) {
743 struct iscsi_mgmt_task
*mtask
;
745 rc
= iscsi_recv_pdu(iscsi_handle(conn
), hdr
,
747 mtask
= (struct iscsi_mgmt_task
*)
748 session
->mgmt_cmds
[conn
->in
.itt
-
749 ISCSI_MGMT_ITT_OFFSET
];
750 if (conn
->login_mtask
!= mtask
) {
751 spin_lock(&session
->lock
);
752 __kfifo_put(session
->mgmtpool
.queue
,
753 (void*)&mtask
, sizeof(void*));
754 spin_unlock(&session
->lock
);
759 rc
= ISCSI_ERR_BAD_OPCODE
;
762 } else if (conn
->in
.itt
== ISCSI_RESERVED_TAG
) {
763 switch(conn
->in
.opcode
) {
764 case ISCSI_OP_NOOP_IN
:
765 if (!conn
->in
.datalen
) {
766 rc
= iscsi_check_assign_cmdsn(session
,
767 (struct iscsi_nopin
*)hdr
);
768 if (!rc
&& hdr
->ttt
!= ISCSI_RESERVED_TAG
)
769 rc
= iscsi_recv_pdu(iscsi_handle(conn
),
772 rc
= ISCSI_ERR_PROTO
;
774 case ISCSI_OP_REJECT
:
775 /* we need sth like iscsi_reject_rsp()*/
776 case ISCSI_OP_ASYNC_EVENT
:
777 /* we need sth like iscsi_async_event_rsp() */
778 rc
= ISCSI_ERR_BAD_OPCODE
;
781 rc
= ISCSI_ERR_BAD_OPCODE
;
785 rc
= ISCSI_ERR_BAD_ITT
;
791 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
792 * @conn: iscsi connection
793 * @ctask: scsi command task
794 * @buf: buffer to copy to
795 * @buf_size: size of buffer
796 * @offset: offset within the buffer
799 * The function calls skb_copy_bits() and updates per-connection and
800 * per-cmd byte counters.
802 * Read counters (in bytes):
804 * conn->in.offset offset within in progress SKB
805 * conn->in.copy left to copy from in progress SKB
807 * conn->in.copied copied already from in progress SKB
808 * conn->data_copied copied already from in progress buffer
809 * ctask->sent total bytes sent up to the MidLayer
810 * ctask->data_count left to copy from in progress Data-In
811 * buf_left left to copy from in progress buffer
814 iscsi_ctask_copy(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
815 void *buf
, int buf_size
, int offset
)
817 int buf_left
= buf_size
- (conn
->data_copied
+ offset
);
818 int size
= min(conn
->in
.copy
, buf_left
);
821 size
= min(size
, ctask
->data_count
);
823 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
824 size
, conn
->in
.offset
, conn
->in
.copied
);
827 BUG_ON(ctask
->sent
+ size
> ctask
->total_length
);
829 rc
= skb_copy_bits(conn
->in
.skb
, conn
->in
.offset
,
830 (char*)buf
+ (offset
+ conn
->data_copied
), size
);
831 /* must fit into skb->len */
834 conn
->in
.offset
+= size
;
835 conn
->in
.copy
-= size
;
836 conn
->in
.copied
+= size
;
837 conn
->data_copied
+= size
;
839 ctask
->data_count
-= size
;
841 BUG_ON(conn
->in
.copy
< 0);
842 BUG_ON(ctask
->data_count
< 0);
844 if (buf_size
!= (conn
->data_copied
+ offset
)) {
845 if (!ctask
->data_count
) {
846 BUG_ON(buf_size
- conn
->data_copied
< 0);
847 /* done with this PDU */
848 return buf_size
- conn
->data_copied
;
853 /* done with this buffer or with both - PDU and buffer */
854 conn
->data_copied
= 0;
859 * iscsi_tcp_copy - copy skb bits to the destanation buffer
860 * @conn: iscsi connection
861 * @buf: buffer to copy to
862 * @buf_size: number of bytes to copy
865 * The function calls skb_copy_bits() and updates per-connection
869 iscsi_tcp_copy(struct iscsi_conn
*conn
, void *buf
, int buf_size
)
871 int buf_left
= buf_size
- conn
->data_copied
;
872 int size
= min(conn
->in
.copy
, buf_left
);
875 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
876 size
, conn
->in
.offset
, conn
->data_copied
);
879 rc
= skb_copy_bits(conn
->in
.skb
, conn
->in
.offset
,
880 (char*)buf
+ conn
->data_copied
, size
);
883 conn
->in
.offset
+= size
;
884 conn
->in
.copy
-= size
;
885 conn
->in
.copied
+= size
;
886 conn
->data_copied
+= size
;
888 if (buf_size
!= conn
->data_copied
)
895 partial_sg_digest_update(struct iscsi_conn
*conn
, struct scatterlist
*sg
,
896 int offset
, int length
)
898 struct scatterlist temp
;
900 memcpy(&temp
, sg
, sizeof(struct scatterlist
));
901 temp
.offset
= offset
;
902 temp
.length
= length
;
903 crypto_digest_update(conn
->data_rx_tfm
, &temp
, 1);
907 iscsi_recv_digest_update(struct iscsi_conn
*conn
, char* buf
, int len
)
909 struct scatterlist tmp
;
911 sg_init_one(&tmp
, buf
, len
);
912 crypto_digest_update(conn
->data_rx_tfm
, &tmp
, 1);
915 static int iscsi_scsi_data_in(struct iscsi_conn
*conn
)
917 struct iscsi_cmd_task
*ctask
= conn
->in
.ctask
;
918 struct scsi_cmnd
*sc
= ctask
->sc
;
919 struct scatterlist
*sg
;
920 int i
, offset
, rc
= 0;
922 BUG_ON((void*)ctask
!= sc
->SCp
.ptr
);
925 * copying Data-In into the Scsi_Cmnd
928 i
= ctask
->data_count
;
929 rc
= iscsi_ctask_copy(conn
, ctask
, sc
->request_buffer
,
930 sc
->request_bufflen
, ctask
->data_offset
);
933 if (conn
->datadgst_en
)
934 iscsi_recv_digest_update(conn
, sc
->request_buffer
, i
);
939 offset
= ctask
->data_offset
;
940 sg
= sc
->request_buffer
;
942 if (ctask
->data_offset
)
943 for (i
= 0; i
< ctask
->sg_count
; i
++)
944 offset
-= sg
[i
].length
;
945 /* we've passed through partial sg*/
949 for (i
= ctask
->sg_count
; i
< sc
->use_sg
; i
++) {
952 dest
= kmap_atomic(sg
[i
].page
, KM_SOFTIRQ0
);
953 rc
= iscsi_ctask_copy(conn
, ctask
, dest
+ sg
[i
].offset
,
954 sg
[i
].length
, offset
);
955 kunmap_atomic(dest
, KM_SOFTIRQ0
);
957 /* continue with the next SKB/PDU */
960 if (conn
->datadgst_en
) {
962 crypto_digest_update(conn
->data_rx_tfm
,
965 partial_sg_digest_update(conn
, &sg
[i
],
966 sg
[i
].offset
+ offset
,
967 sg
[i
].length
- offset
);
973 if (!ctask
->data_count
) {
974 if (rc
&& conn
->datadgst_en
)
976 * data-in is complete, but buffer not...
978 partial_sg_digest_update(conn
, &sg
[i
],
979 sg
[i
].offset
, sg
[i
].length
-rc
);
987 BUG_ON(ctask
->data_count
);
990 /* check for non-exceptional status */
991 if (conn
->in
.flags
& ISCSI_FLAG_DATA_STATUS
) {
992 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
993 (long)sc
, sc
->result
, ctask
->itt
);
994 conn
->scsirsp_pdus_cnt
++;
995 iscsi_ctask_cleanup(conn
, ctask
);
1003 iscsi_data_recv(struct iscsi_conn
*conn
)
1005 struct iscsi_session
*session
= conn
->session
;
1008 switch(conn
->in
.opcode
) {
1009 case ISCSI_OP_SCSI_DATA_IN
:
1010 rc
= iscsi_scsi_data_in(conn
);
1012 case ISCSI_OP_SCSI_CMD_RSP
: {
1015 * copying the entire Data Segment.
1017 if (iscsi_tcp_copy(conn
, conn
->data
, conn
->in
.datalen
)) {
1025 conn
->in
.hdr
= &conn
->hdr
;
1026 conn
->senselen
= (conn
->data
[0] << 8) | conn
->data
[1];
1027 rc
= iscsi_cmd_rsp(conn
, conn
->in
.ctask
);
1028 if (!rc
&& conn
->datadgst_en
)
1029 iscsi_recv_digest_update(conn
, conn
->data
,
1033 case ISCSI_OP_TEXT_RSP
:
1034 case ISCSI_OP_LOGIN_RSP
:
1035 case ISCSI_OP_NOOP_IN
: {
1036 struct iscsi_mgmt_task
*mtask
= NULL
;
1038 if (conn
->in
.itt
!= ISCSI_RESERVED_TAG
)
1039 mtask
= (struct iscsi_mgmt_task
*)
1040 session
->mgmt_cmds
[conn
->in
.itt
-
1041 ISCSI_MGMT_ITT_OFFSET
];
1044 * Collect data segment to the connection's data
1047 if (iscsi_tcp_copy(conn
, conn
->data
, conn
->in
.datalen
)) {
1052 rc
= iscsi_recv_pdu(iscsi_handle(conn
), conn
->in
.hdr
,
1053 conn
->data
, conn
->in
.datalen
);
1055 if (!rc
&& conn
->datadgst_en
&&
1056 conn
->in
.opcode
!= ISCSI_OP_LOGIN_RSP
)
1057 iscsi_recv_digest_update(conn
, conn
->data
,
1060 if (mtask
&& conn
->login_mtask
!= mtask
) {
1061 spin_lock(&session
->lock
);
1062 __kfifo_put(session
->mgmtpool
.queue
, (void*)&mtask
,
1064 spin_unlock(&session
->lock
);
1068 case ISCSI_OP_ASYNC_EVENT
:
1069 case ISCSI_OP_REJECT
:
1078 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1079 * @rd_desc: read descriptor
1080 * @skb: socket buffer
1081 * @offset: offset in skb
1082 * @len: skb->len - offset
1085 iscsi_tcp_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
1086 unsigned int offset
, size_t len
)
1089 struct iscsi_conn
*conn
= rd_desc
->arg
.data
;
1091 char pad
[ISCSI_PAD_LEN
];
1092 struct scatterlist sg
;
1095 * Save current SKB and its offset in the corresponding
1096 * connection context.
1098 conn
->in
.copy
= skb
->len
- offset
;
1099 conn
->in
.offset
= offset
;
1101 conn
->in
.len
= conn
->in
.copy
;
1102 BUG_ON(conn
->in
.copy
<= 0);
1103 debug_tcp("in %d bytes\n", conn
->in
.copy
);
1106 conn
->in
.copied
= 0;
1109 if (unlikely(conn
->suspend_rx
)) {
1110 debug_tcp("conn %d Rx suspended!\n", conn
->id
);
1114 if (conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
||
1115 conn
->in_progress
== IN_PROGRESS_HEADER_GATHER
) {
1116 rc
= iscsi_hdr_extract(conn
);
1121 iscsi_conn_failure(conn
, rc
);
1127 * Verify and process incoming PDU header.
1129 rc
= iscsi_hdr_recv(conn
);
1130 if (!rc
&& conn
->in
.datalen
) {
1131 if (conn
->datadgst_en
&&
1132 conn
->in
.opcode
!= ISCSI_OP_LOGIN_RSP
) {
1133 BUG_ON(!conn
->data_rx_tfm
);
1134 crypto_digest_init(conn
->data_rx_tfm
);
1136 conn
->in_progress
= IN_PROGRESS_DATA_RECV
;
1138 iscsi_conn_failure(conn
, rc
);
1143 if (conn
->in_progress
== IN_PROGRESS_DDIGEST_RECV
) {
1144 uint32_t recv_digest
;
1145 debug_tcp("extra data_recv offset %d copy %d\n",
1146 conn
->in
.offset
, conn
->in
.copy
);
1147 skb_copy_bits(conn
->in
.skb
, conn
->in
.offset
,
1149 conn
->in
.offset
+= 4;
1151 if (recv_digest
!= conn
->in
.datadgst
) {
1152 debug_tcp("iscsi_tcp: data digest error!"
1153 "0x%x != 0x%x\n", recv_digest
,
1155 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1158 debug_tcp("iscsi_tcp: data digest match!"
1159 "0x%x == 0x%x\n", recv_digest
,
1161 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
1165 if (conn
->in_progress
== IN_PROGRESS_DATA_RECV
&& conn
->in
.copy
) {
1167 debug_tcp("data_recv offset %d copy %d\n",
1168 conn
->in
.offset
, conn
->in
.copy
);
1170 rc
= iscsi_data_recv(conn
);
1172 if (rc
== -EAGAIN
) {
1173 rd_desc
->count
= conn
->in
.datalen
-
1174 conn
->in
.ctask
->data_count
;
1177 iscsi_conn_failure(conn
, rc
);
1180 conn
->in
.copy
-= conn
->in
.padding
;
1181 conn
->in
.offset
+= conn
->in
.padding
;
1182 if (conn
->datadgst_en
&&
1183 conn
->in
.opcode
!= ISCSI_OP_LOGIN_RSP
) {
1184 if (conn
->in
.padding
) {
1185 debug_tcp("padding -> %d\n", conn
->in
.padding
);
1186 memset(pad
, 0, conn
->in
.padding
);
1187 sg_init_one(&sg
, pad
, conn
->in
.padding
);
1188 crypto_digest_update(conn
->data_rx_tfm
, &sg
, 1);
1190 crypto_digest_final(conn
->data_rx_tfm
,
1191 (u8
*) & conn
->in
.datadgst
);
1192 debug_tcp("rx digest 0x%x\n", conn
->in
.datadgst
);
1193 conn
->in_progress
= IN_PROGRESS_DDIGEST_RECV
;
1195 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
1198 debug_tcp("f, processed %d from out of %d padding %d\n",
1199 conn
->in
.offset
- offset
, (int)len
, conn
->in
.padding
);
1200 BUG_ON(conn
->in
.offset
- offset
> len
);
1202 if (conn
->in
.offset
- offset
!= len
) {
1203 debug_tcp("continue to process %d bytes\n",
1204 (int)len
- (conn
->in
.offset
- offset
));
1209 processed
= conn
->in
.offset
- offset
;
1210 BUG_ON(processed
== 0);
1214 processed
= conn
->in
.offset
- offset
;
1215 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1216 processed
, (int)len
, (int)rd_desc
->count
);
1217 BUG_ON(processed
== 0);
1218 BUG_ON(processed
> len
);
1220 conn
->rxdata_octets
+= processed
;
1225 iscsi_tcp_data_ready(struct sock
*sk
, int flag
)
1227 struct iscsi_conn
*conn
= sk
->sk_user_data
;
1228 read_descriptor_t rd_desc
;
1230 read_lock(&sk
->sk_callback_lock
);
1232 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1233 rd_desc
.arg
.data
= conn
;
1235 tcp_read_sock(sk
, &rd_desc
, iscsi_tcp_data_recv
);
1237 read_unlock(&sk
->sk_callback_lock
);
1241 iscsi_tcp_state_change(struct sock
*sk
)
1243 struct iscsi_conn
*conn
;
1244 struct iscsi_session
*session
;
1245 void (*old_state_change
)(struct sock
*);
1247 read_lock(&sk
->sk_callback_lock
);
1249 conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1250 session
= conn
->session
;
1252 if ((sk
->sk_state
== TCP_CLOSE_WAIT
||
1253 sk
->sk_state
== TCP_CLOSE
) &&
1254 !atomic_read(&sk
->sk_rmem_alloc
)) {
1255 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1256 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1259 old_state_change
= conn
->old_state_change
;
1261 read_unlock(&sk
->sk_callback_lock
);
1263 old_state_change(sk
);
1267 * iscsi_write_space - Called when more output buffer space is available
1268 * @sk: socket space is available for
1271 iscsi_write_space(struct sock
*sk
)
1273 struct iscsi_conn
*conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1274 conn
->old_write_space(sk
);
1275 debug_tcp("iscsi_write_space: cid %d\n", conn
->id
);
1276 clear_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
1277 schedule_work(&conn
->xmitwork
);
1281 iscsi_conn_set_callbacks(struct iscsi_conn
*conn
)
1283 struct sock
*sk
= conn
->sock
->sk
;
1285 /* assign new callbacks */
1286 write_lock_bh(&sk
->sk_callback_lock
);
1287 sk
->sk_user_data
= conn
;
1288 conn
->old_data_ready
= sk
->sk_data_ready
;
1289 conn
->old_state_change
= sk
->sk_state_change
;
1290 conn
->old_write_space
= sk
->sk_write_space
;
1291 sk
->sk_data_ready
= iscsi_tcp_data_ready
;
1292 sk
->sk_state_change
= iscsi_tcp_state_change
;
1293 sk
->sk_write_space
= iscsi_write_space
;
1294 write_unlock_bh(&sk
->sk_callback_lock
);
1298 iscsi_conn_restore_callbacks(struct iscsi_conn
*conn
)
1300 struct sock
*sk
= conn
->sock
->sk
;
1302 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1303 write_lock_bh(&sk
->sk_callback_lock
);
1304 sk
->sk_user_data
= NULL
;
1305 sk
->sk_data_ready
= conn
->old_data_ready
;
1306 sk
->sk_state_change
= conn
->old_state_change
;
1307 sk
->sk_write_space
= conn
->old_write_space
;
1308 sk
->sk_no_check
= 0;
1309 write_unlock_bh(&sk
->sk_callback_lock
);
1313 * iscsi_send - generic send routine
1314 * @sk: kernel's socket
1315 * @buf: buffer to write from
1316 * @size: actual size to write
1317 * @flags: socket's flags
1320 * depending on buffer will use tcp_sendpage() or tcp_sendmsg().
1321 * buf->sg.offset == -1 tells us that buffer is non S/G and forces
1322 * to use tcp_sendmsg().
1325 iscsi_send(struct socket
*sk
, struct iscsi_buf
*buf
, int size
, int flags
)
1329 if ((int)buf
->sg
.offset
>= 0) {
1330 int offset
= buf
->sg
.offset
+ buf
->sent
;
1333 res
= sk
->ops
->sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1337 buf
->iov
.iov_base
= iscsi_buf_iov_base(buf
);
1338 buf
->iov
.iov_len
= size
;
1340 memset(&msg
, 0, sizeof(struct msghdr
));
1343 res
= kernel_sendmsg(sk
, &msg
, &buf
->iov
, 1, size
);
1350 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1351 * @conn: iscsi connection
1352 * @buf: buffer to write from
1353 * @datalen: lenght of data to be sent after the header
1359 iscsi_sendhdr(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int datalen
)
1361 struct socket
*sk
= conn
->sock
;
1362 int flags
= 0; /* MSG_DONTWAIT; */
1365 size
= buf
->sg
.length
- buf
->sent
;
1366 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1367 if (buf
->sent
+ size
!= buf
->sg
.length
|| datalen
)
1370 res
= iscsi_send(sk
, buf
, size
, flags
);
1371 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size
, buf
->sent
, res
);
1373 conn
->txdata_octets
+= res
;
1378 } else if (res
== -EAGAIN
) {
1379 conn
->sendpage_failures_cnt
++;
1380 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
1381 } else if (res
== -EPIPE
)
1382 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1388 * iscsi_sendpage - send one page of iSCSI Data-Out.
1389 * @conn: iscsi connection
1390 * @buf: buffer to write from
1391 * @count: remaining data
1392 * @sent: number of bytes sent
1398 iscsi_sendpage(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
1399 int *count
, int *sent
)
1401 struct socket
*sk
= conn
->sock
;
1402 int flags
= 0; /* MSG_DONTWAIT; */
1405 size
= buf
->sg
.length
- buf
->sent
;
1406 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1409 if (buf
->sent
+ size
!= buf
->sg
.length
|| *count
!= size
)
1412 res
= iscsi_send(sk
, buf
, size
, flags
);
1413 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1414 size
, buf
->sent
, *count
, *sent
, res
);
1416 conn
->txdata_octets
+= res
;
1423 } else if (res
== -EAGAIN
) {
1424 conn
->sendpage_failures_cnt
++;
1425 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
1426 } else if (res
== -EPIPE
)
1427 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1433 iscsi_data_digest_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1435 BUG_ON(!conn
->data_tx_tfm
);
1436 crypto_digest_init(conn
->data_tx_tfm
);
1437 ctask
->digest_count
= 4;
1441 iscsi_buf_data_digest_update(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
)
1443 struct scatterlist sg
;
1445 if (buf
->sg
.offset
!= -1)
1446 crypto_digest_update(conn
->data_tx_tfm
, &buf
->sg
, 1);
1448 sg_init_one(&sg
, (char *)buf
->sg
.page
, buf
->sg
.length
);
1449 crypto_digest_update(conn
->data_tx_tfm
, &sg
, 1);
1454 iscsi_digest_final_send(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1455 struct iscsi_buf
*buf
, uint32_t *digest
, int final
)
1461 crypto_digest_final(conn
->data_tx_tfm
, (u8
*)digest
);
1463 iscsi_buf_init_virt(buf
, (char*)digest
, 4);
1464 rc
= iscsi_sendpage(conn
, buf
, &ctask
->digest_count
, &sent
);
1466 ctask
->datadigest
= *digest
;
1467 ctask
->xmstate
|= XMSTATE_DATA_DIGEST
;
1469 ctask
->digest_count
= 4;
1474 * iscsi_solicit_data_cont - initialize next Data-Out
1475 * @conn: iscsi connection
1476 * @ctask: scsi command task
1478 * @left: bytes left to transfer
1481 * Initialize next Data-Out within this R2T sequence and continue
1482 * to process next Scatter-Gather element(if any) of this SCSI command.
1484 * Called under connection lock.
1487 iscsi_solicit_data_cont(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1488 struct iscsi_r2t_info
*r2t
, int left
)
1490 struct iscsi_data
*hdr
;
1491 struct iscsi_data_task
*dtask
;
1492 struct scsi_cmnd
*sc
= ctask
->sc
;
1495 dtask
= mempool_alloc(ctask
->datapool
, GFP_ATOMIC
);
1498 memset(hdr
, 0, sizeof(struct iscsi_data
));
1499 hdr
->ttt
= r2t
->ttt
;
1500 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
1501 r2t
->solicit_datasn
++;
1502 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
1503 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
1504 hdr
->itt
= ctask
->hdr
.itt
;
1505 hdr
->exp_statsn
= r2t
->exp_statsn
;
1506 new_offset
= r2t
->data_offset
+ r2t
->sent
;
1507 hdr
->offset
= cpu_to_be32(new_offset
);
1508 if (left
> conn
->max_xmit_dlength
) {
1509 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
1510 r2t
->data_count
= conn
->max_xmit_dlength
;
1512 hton24(hdr
->dlength
, left
);
1513 r2t
->data_count
= left
;
1514 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1516 conn
->dataout_pdus_cnt
++;
1518 iscsi_buf_init_virt(&r2t
->headbuf
, (char*)hdr
,
1519 sizeof(struct iscsi_hdr
));
1523 if (sc
->use_sg
&& !iscsi_buf_left(&r2t
->sendbuf
)) {
1524 BUG_ON(ctask
->bad_sg
== r2t
->sg
);
1525 iscsi_buf_init_sg(&r2t
->sendbuf
, r2t
->sg
);
1528 iscsi_buf_init_iov(&ctask
->sendbuf
,
1529 (char*)sc
->request_buffer
+ new_offset
,
1532 list_add(&dtask
->item
, &ctask
->dataqueue
);
1536 iscsi_unsolicit_data_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1538 struct iscsi_data
*hdr
;
1539 struct iscsi_data_task
*dtask
;
1541 dtask
= mempool_alloc(ctask
->datapool
, GFP_ATOMIC
);
1544 memset(hdr
, 0, sizeof(struct iscsi_data
));
1545 hdr
->ttt
= cpu_to_be32(ISCSI_RESERVED_TAG
);
1546 hdr
->datasn
= cpu_to_be32(ctask
->unsol_datasn
);
1547 ctask
->unsol_datasn
++;
1548 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
1549 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
1550 hdr
->itt
= ctask
->hdr
.itt
;
1551 hdr
->exp_statsn
= cpu_to_be32(conn
->exp_statsn
);
1552 hdr
->offset
= cpu_to_be32(ctask
->total_length
-
1553 ctask
->r2t_data_count
-
1554 ctask
->unsol_count
);
1555 if (ctask
->unsol_count
> conn
->max_xmit_dlength
) {
1556 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
1557 ctask
->data_count
= conn
->max_xmit_dlength
;
1560 hton24(hdr
->dlength
, ctask
->unsol_count
);
1561 ctask
->data_count
= ctask
->unsol_count
;
1562 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1565 iscsi_buf_init_virt(&ctask
->headbuf
, (char*)hdr
,
1566 sizeof(struct iscsi_hdr
));
1568 list_add(&dtask
->item
, &ctask
->dataqueue
);
1570 ctask
->dtask
= dtask
;
1574 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1575 * @conn: iscsi connection
1576 * @ctask: scsi command task
1580 iscsi_cmd_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1581 struct scsi_cmnd
*sc
)
1583 struct iscsi_session
*session
= conn
->session
;
1585 BUG_ON(__kfifo_len(ctask
->r2tqueue
));
1589 ctask
->hdr
.opcode
= ISCSI_OP_SCSI_CMD
;
1590 ctask
->hdr
.flags
= ISCSI_ATTR_SIMPLE
;
1591 int_to_scsilun(sc
->device
->lun
, (struct scsi_lun
*)ctask
->hdr
.lun
);
1592 ctask
->hdr
.itt
= ctask
->itt
| (conn
->id
<< CID_SHIFT
) |
1593 (session
->age
<< AGE_SHIFT
);
1594 ctask
->hdr
.data_length
= cpu_to_be32(sc
->request_bufflen
);
1595 ctask
->hdr
.cmdsn
= cpu_to_be32(session
->cmdsn
); session
->cmdsn
++;
1596 ctask
->hdr
.exp_statsn
= cpu_to_be32(conn
->exp_statsn
);
1597 memcpy(ctask
->hdr
.cdb
, sc
->cmnd
, sc
->cmd_len
);
1598 memset(&ctask
->hdr
.cdb
[sc
->cmd_len
], 0, MAX_COMMAND_SIZE
- sc
->cmd_len
);
1600 ctask
->mtask
= NULL
;
1602 ctask
->sg_count
= 0;
1604 ctask
->total_length
= sc
->request_bufflen
;
1606 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
1607 ctask
->exp_r2tsn
= 0;
1608 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_WRITE
;
1609 BUG_ON(ctask
->total_length
== 0);
1611 struct scatterlist
*sg
= sc
->request_buffer
;
1613 iscsi_buf_init_sg(&ctask
->sendbuf
,
1614 &sg
[ctask
->sg_count
++]);
1616 ctask
->bad_sg
= sg
+ sc
->use_sg
;
1618 iscsi_buf_init_iov(&ctask
->sendbuf
, sc
->request_buffer
,
1619 sc
->request_bufflen
);
1625 * imm_count bytes to be sent right after
1628 * unsol_count bytes(as Data-Out) to be sent
1629 * without R2T ack right after
1632 * r2t_data_count bytes to be sent via R2T ack's
1634 * pad_count bytes to be sent as zero-padding
1636 ctask
->imm_count
= 0;
1637 ctask
->unsol_count
= 0;
1638 ctask
->unsol_datasn
= 0;
1639 ctask
->xmstate
= XMSTATE_W_HDR
;
1640 /* calculate write padding */
1641 ctask
->pad_count
= ctask
->total_length
& (ISCSI_PAD_LEN
-1);
1642 if (ctask
->pad_count
) {
1643 ctask
->pad_count
= ISCSI_PAD_LEN
- ctask
->pad_count
;
1644 debug_scsi("write padding %d bytes\n",
1646 ctask
->xmstate
|= XMSTATE_W_PAD
;
1648 if (session
->imm_data_en
) {
1649 if (ctask
->total_length
>= session
->first_burst
)
1650 ctask
->imm_count
= min(session
->first_burst
,
1651 conn
->max_xmit_dlength
);
1653 ctask
->imm_count
= min(ctask
->total_length
,
1654 conn
->max_xmit_dlength
);
1655 hton24(ctask
->hdr
.dlength
, ctask
->imm_count
);
1656 ctask
->xmstate
|= XMSTATE_IMM_DATA
;
1658 zero_data(ctask
->hdr
.dlength
);
1660 if (!session
->initial_r2t_en
)
1661 ctask
->unsol_count
= min(session
->first_burst
,
1662 ctask
->total_length
) - ctask
->imm_count
;
1663 if (!ctask
->unsol_count
)
1664 /* No unsolicit Data-Out's */
1665 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_FINAL
;
1667 ctask
->xmstate
|= XMSTATE_UNS_HDR
| XMSTATE_UNS_INIT
;
1669 ctask
->r2t_data_count
= ctask
->total_length
-
1673 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1675 ctask
->itt
, ctask
->total_length
, ctask
->imm_count
,
1676 ctask
->unsol_count
, ctask
->r2t_data_count
);
1678 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_FINAL
;
1679 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
)
1680 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_READ
;
1682 ctask
->xmstate
= XMSTATE_R_HDR
;
1683 zero_data(ctask
->hdr
.dlength
);
1686 iscsi_buf_init_virt(&ctask
->headbuf
, (char*)&ctask
->hdr
,
1687 sizeof(struct iscsi_hdr
));
1688 conn
->scsicmd_pdus_cnt
++;
1692 * iscsi_mtask_xmit - xmit management(immediate) task
1693 * @conn: iscsi connection
1694 * @mtask: task management task
1697 * The function can return -EAGAIN in which case caller must
1698 * call it again later, or recover. '0' return code means successful
1701 * Management xmit state machine consists of two states:
1702 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1703 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1706 iscsi_mtask_xmit(struct iscsi_conn
*conn
, struct iscsi_mgmt_task
*mtask
)
1709 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1710 conn
->id
, mtask
->xmstate
, mtask
->itt
);
1712 if (mtask
->xmstate
& XMSTATE_IMM_HDR
) {
1713 mtask
->xmstate
&= ~XMSTATE_IMM_HDR
;
1714 if (mtask
->data_count
)
1715 mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1716 if (conn
->c_stage
!= ISCSI_CONN_INITIAL_STAGE
&&
1717 conn
->stop_stage
!= STOP_CONN_RECOVER
&&
1719 iscsi_hdr_digest(conn
, &mtask
->headbuf
,
1720 (u8
*)mtask
->hdrext
);
1721 if (iscsi_sendhdr(conn
, &mtask
->headbuf
, mtask
->data_count
)) {
1722 mtask
->xmstate
|= XMSTATE_IMM_HDR
;
1723 if (mtask
->data_count
)
1724 mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1729 if (mtask
->xmstate
& XMSTATE_IMM_DATA
) {
1730 BUG_ON(!mtask
->data_count
);
1731 mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1732 /* FIXME: implement.
1733 * Virtual buffer could be spreaded across multiple pages...
1736 if (iscsi_sendpage(conn
, &mtask
->sendbuf
,
1737 &mtask
->data_count
, &mtask
->sent
)) {
1738 mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1741 } while (mtask
->data_count
);
1744 BUG_ON(mtask
->xmstate
!= XMSTATE_IDLE
);
1749 handle_xmstate_r_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1751 ctask
->xmstate
&= ~XMSTATE_R_HDR
;
1752 if (conn
->hdrdgst_en
)
1753 iscsi_hdr_digest(conn
, &ctask
->headbuf
, (u8
*)ctask
->hdrext
);
1754 if (!iscsi_sendhdr(conn
, &ctask
->headbuf
, 0)) {
1755 BUG_ON(ctask
->xmstate
!= XMSTATE_IDLE
);
1756 return 0; /* wait for Data-In */
1758 ctask
->xmstate
|= XMSTATE_R_HDR
;
1763 handle_xmstate_w_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1765 ctask
->xmstate
&= ~XMSTATE_W_HDR
;
1766 if (conn
->hdrdgst_en
)
1767 iscsi_hdr_digest(conn
, &ctask
->headbuf
, (u8
*)ctask
->hdrext
);
1768 if (iscsi_sendhdr(conn
, &ctask
->headbuf
, ctask
->imm_count
)) {
1769 ctask
->xmstate
|= XMSTATE_W_HDR
;
1776 handle_xmstate_data_digest(struct iscsi_conn
*conn
,
1777 struct iscsi_cmd_task
*ctask
)
1779 ctask
->xmstate
&= ~XMSTATE_DATA_DIGEST
;
1780 debug_tcp("resent data digest 0x%x\n", ctask
->datadigest
);
1781 if (iscsi_digest_final_send(conn
, ctask
, &ctask
->immbuf
,
1782 &ctask
->datadigest
, 0)) {
1783 ctask
->xmstate
|= XMSTATE_DATA_DIGEST
;
1784 debug_tcp("resent data digest 0x%x fail!\n",
1792 handle_xmstate_imm_data(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1794 BUG_ON(!ctask
->imm_count
);
1795 ctask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1797 if (conn
->datadgst_en
) {
1798 iscsi_data_digest_init(conn
, ctask
);
1799 ctask
->immdigest
= 0;
1803 if (iscsi_sendpage(conn
, &ctask
->sendbuf
, &ctask
->imm_count
,
1805 ctask
->xmstate
|= XMSTATE_IMM_DATA
;
1806 if (conn
->datadgst_en
) {
1807 crypto_digest_final(conn
->data_tx_tfm
,
1808 (u8
*)&ctask
->immdigest
);
1809 debug_tcp("tx imm sendpage fail 0x%x\n",
1814 if (conn
->datadgst_en
)
1815 iscsi_buf_data_digest_update(conn
, &ctask
->sendbuf
);
1817 if (!ctask
->imm_count
)
1819 iscsi_buf_init_sg(&ctask
->sendbuf
,
1820 &ctask
->sg
[ctask
->sg_count
++]);
1823 if (conn
->datadgst_en
&& !(ctask
->xmstate
& XMSTATE_W_PAD
)) {
1824 if (iscsi_digest_final_send(conn
, ctask
, &ctask
->immbuf
,
1825 &ctask
->immdigest
, 1)) {
1826 debug_tcp("sending imm digest 0x%x fail!\n",
1830 debug_tcp("sending imm digest 0x%x\n", ctask
->immdigest
);
1837 handle_xmstate_uns_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1839 struct iscsi_data_task
*dtask
;
1841 ctask
->xmstate
|= XMSTATE_UNS_DATA
;
1842 if (ctask
->xmstate
& XMSTATE_UNS_INIT
) {
1843 iscsi_unsolicit_data_init(conn
, ctask
);
1844 BUG_ON(!ctask
->dtask
);
1845 dtask
= ctask
->dtask
;
1846 if (conn
->hdrdgst_en
)
1847 iscsi_hdr_digest(conn
, &ctask
->headbuf
,
1848 (u8
*)dtask
->hdrext
);
1849 ctask
->xmstate
&= ~XMSTATE_UNS_INIT
;
1851 if (iscsi_sendhdr(conn
, &ctask
->headbuf
, ctask
->data_count
)) {
1852 ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1853 ctask
->xmstate
|= XMSTATE_UNS_HDR
;
1857 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1858 ctask
->itt
, ctask
->unsol_count
, ctask
->sent
);
1863 handle_xmstate_uns_data(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1865 struct iscsi_data_task
*dtask
= ctask
->dtask
;
1867 BUG_ON(!ctask
->data_count
);
1868 ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1870 if (conn
->datadgst_en
) {
1871 iscsi_data_digest_init(conn
, ctask
);
1876 int start
= ctask
->sent
;
1878 if (iscsi_sendpage(conn
, &ctask
->sendbuf
, &ctask
->data_count
,
1880 ctask
->unsol_count
-= ctask
->sent
- start
;
1881 ctask
->xmstate
|= XMSTATE_UNS_DATA
;
1882 /* will continue with this ctask later.. */
1883 if (conn
->datadgst_en
) {
1884 crypto_digest_final(conn
->data_tx_tfm
,
1885 (u8
*)&dtask
->digest
);
1886 debug_tcp("tx uns data fail 0x%x\n",
1892 BUG_ON(ctask
->sent
> ctask
->total_length
);
1893 ctask
->unsol_count
-= ctask
->sent
- start
;
1896 * XXX:we may run here with un-initial sendbuf.
1899 if (conn
->datadgst_en
&& ctask
->sent
- start
> 0)
1900 iscsi_buf_data_digest_update(conn
, &ctask
->sendbuf
);
1902 if (!ctask
->data_count
)
1904 iscsi_buf_init_sg(&ctask
->sendbuf
,
1905 &ctask
->sg
[ctask
->sg_count
++]);
1907 BUG_ON(ctask
->unsol_count
< 0);
1910 * Done with the Data-Out. Next, check if we need
1911 * to send another unsolicited Data-Out.
1913 if (ctask
->unsol_count
) {
1914 if (conn
->datadgst_en
) {
1915 if (iscsi_digest_final_send(conn
, ctask
,
1917 &dtask
->digest
, 1)) {
1918 debug_tcp("send uns digest 0x%x fail\n",
1922 debug_tcp("sending uns digest 0x%x, more uns\n",
1925 ctask
->xmstate
|= XMSTATE_UNS_INIT
;
1929 if (conn
->datadgst_en
&& !(ctask
->xmstate
& XMSTATE_W_PAD
)) {
1930 if (iscsi_digest_final_send(conn
, ctask
,
1932 &dtask
->digest
, 1)) {
1933 debug_tcp("send last uns digest 0x%x fail\n",
1937 debug_tcp("sending uns digest 0x%x\n",dtask
->digest
);
1944 handle_xmstate_sol_data(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1946 struct iscsi_session
*session
= conn
->session
;
1947 struct iscsi_r2t_info
*r2t
= ctask
->r2t
;
1948 struct iscsi_data_task
*dtask
= r2t
->dtask
;
1951 ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
1952 ctask
->dtask
= dtask
;
1954 if (conn
->datadgst_en
) {
1955 iscsi_data_digest_init(conn
, ctask
);
1960 * send Data-Out whitnin this R2T sequence.
1962 if (!r2t
->data_count
)
1965 if (iscsi_sendpage(conn
, &r2t
->sendbuf
, &r2t
->data_count
, &r2t
->sent
)) {
1966 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1967 /* will continue with this ctask later.. */
1968 if (conn
->datadgst_en
) {
1969 crypto_digest_final(conn
->data_tx_tfm
,
1970 (u8
*)&dtask
->digest
);
1971 debug_tcp("r2t data send fail 0x%x\n", dtask
->digest
);
1976 BUG_ON(r2t
->data_count
< 0);
1977 if (conn
->datadgst_en
)
1978 iscsi_buf_data_digest_update(conn
, &r2t
->sendbuf
);
1980 if (r2t
->data_count
) {
1981 BUG_ON(ctask
->sc
->use_sg
== 0);
1982 if (!iscsi_buf_left(&r2t
->sendbuf
)) {
1983 BUG_ON(ctask
->bad_sg
== r2t
->sg
);
1984 iscsi_buf_init_sg(&r2t
->sendbuf
, r2t
->sg
);
1992 * Done with this Data-Out. Next, check if we have
1993 * to send another Data-Out for this R2T.
1995 BUG_ON(r2t
->data_length
- r2t
->sent
< 0);
1996 left
= r2t
->data_length
- r2t
->sent
;
1998 if (conn
->datadgst_en
) {
1999 if (iscsi_digest_final_send(conn
, ctask
,
2001 &dtask
->digest
, 1)) {
2002 debug_tcp("send r2t data digest 0x%x"
2003 "fail\n", dtask
->digest
);
2006 debug_tcp("r2t data send digest 0x%x\n",
2009 iscsi_solicit_data_cont(conn
, ctask
, r2t
, left
);
2010 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
2011 ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
2016 * Done with this R2T. Check if there are more
2017 * outstanding R2Ts ready to be processed.
2019 BUG_ON(ctask
->r2t_data_count
- r2t
->data_length
< 0);
2020 if (conn
->datadgst_en
) {
2021 if (iscsi_digest_final_send(conn
, ctask
, &dtask
->digestbuf
,
2022 &dtask
->digest
, 1)) {
2023 debug_tcp("send last r2t data digest 0x%x"
2024 "fail\n", dtask
->digest
);
2027 debug_tcp("r2t done dout digest 0x%x\n", dtask
->digest
);
2030 ctask
->r2t_data_count
-= r2t
->data_length
;
2032 spin_lock_bh(&session
->lock
);
2033 __kfifo_put(ctask
->r2tpool
.queue
, (void*)&r2t
, sizeof(void*));
2034 spin_unlock_bh(&session
->lock
);
2035 if (__kfifo_get(ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*))) {
2037 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
2038 ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
2046 handle_xmstate_w_pad(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
2048 struct iscsi_data_task
*dtask
= ctask
->dtask
;
2051 ctask
->xmstate
&= ~XMSTATE_W_PAD
;
2052 iscsi_buf_init_virt(&ctask
->sendbuf
, (char*)&ctask
->pad
,
2054 if (iscsi_sendpage(conn
, &ctask
->sendbuf
, &ctask
->pad_count
, &sent
)) {
2055 ctask
->xmstate
|= XMSTATE_W_PAD
;
2059 if (conn
->datadgst_en
) {
2060 iscsi_buf_data_digest_update(conn
, &ctask
->sendbuf
);
2063 if (iscsi_digest_final_send(conn
, ctask
, &ctask
->immbuf
,
2064 &ctask
->immdigest
, 1)) {
2065 debug_tcp("send padding digest 0x%x"
2066 "fail!\n", ctask
->immdigest
);
2069 debug_tcp("done with padding, digest 0x%x\n",
2072 if (iscsi_digest_final_send(conn
, ctask
,
2074 &dtask
->digest
, 1)) {
2075 debug_tcp("send padding digest 0x%x"
2076 "fail\n", dtask
->digest
);
2079 debug_tcp("done with padding, digest 0x%x\n",
2088 iscsi_ctask_xmit(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
2092 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2093 conn
->id
, ctask
->xmstate
, ctask
->itt
);
2096 * serialize with TMF AbortTask
2101 if (ctask
->xmstate
& XMSTATE_R_HDR
) {
2102 rc
= handle_xmstate_r_hdr(conn
, ctask
);
2106 if (ctask
->xmstate
& XMSTATE_W_HDR
) {
2107 rc
= handle_xmstate_w_hdr(conn
, ctask
);
2112 /* XXX: for data digest xmit recover */
2113 if (ctask
->xmstate
& XMSTATE_DATA_DIGEST
) {
2114 rc
= handle_xmstate_data_digest(conn
, ctask
);
2119 if (ctask
->xmstate
& XMSTATE_IMM_DATA
) {
2120 rc
= handle_xmstate_imm_data(conn
, ctask
);
2125 if (ctask
->xmstate
& XMSTATE_UNS_HDR
) {
2126 BUG_ON(!ctask
->unsol_count
);
2127 ctask
->xmstate
&= ~XMSTATE_UNS_HDR
;
2128 unsolicit_head_again
:
2129 rc
= handle_xmstate_uns_hdr(conn
, ctask
);
2134 if (ctask
->xmstate
& XMSTATE_UNS_DATA
) {
2135 rc
= handle_xmstate_uns_data(conn
, ctask
);
2137 goto unsolicit_head_again
;
2143 if (ctask
->xmstate
& XMSTATE_SOL_HDR
) {
2144 struct iscsi_r2t_info
*r2t
;
2146 ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
2147 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
2149 __kfifo_get(ctask
->r2tqueue
, (void*)&ctask
->r2t
,
2153 if (conn
->hdrdgst_en
)
2154 iscsi_hdr_digest(conn
, &r2t
->headbuf
,
2155 (u8
*)r2t
->dtask
->hdrext
);
2156 if (iscsi_sendhdr(conn
, &r2t
->headbuf
, r2t
->data_count
)) {
2157 ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
2158 ctask
->xmstate
|= XMSTATE_SOL_HDR
;
2162 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2163 r2t
->solicit_datasn
- 1, ctask
->itt
, r2t
->data_count
,
2167 if (ctask
->xmstate
& XMSTATE_SOL_DATA
) {
2168 rc
= handle_xmstate_sol_data(conn
, ctask
);
2170 goto solicit_head_again
;
2177 * Last thing to check is whether we need to send write
2178 * padding. Note that we check for xmstate equality, not just the bit.
2180 if (ctask
->xmstate
== XMSTATE_W_PAD
)
2181 rc
= handle_xmstate_w_pad(conn
, ctask
);
2187 * iscsi_data_xmit - xmit any command into the scheduled connection
2188 * @conn: iscsi connection
2191 * The function can return -EAGAIN in which case the caller must
2192 * re-schedule it again later or recover. '0' return code means
2196 iscsi_data_xmit(struct iscsi_conn
*conn
)
2198 if (unlikely(conn
->suspend_tx
)) {
2199 debug_tcp("conn %d Tx suspended!\n", conn
->id
);
2204 * Transmit in the following order:
2206 * 1) un-finished xmit (ctask or mtask)
2207 * 2) immediate control PDUs
2210 * 5) non-immediate control PDUs
2212 * No need to lock around __kfifo_get as long as
2213 * there's one producer and one consumer.
2216 BUG_ON(conn
->ctask
&& conn
->mtask
);
2219 if (iscsi_ctask_xmit(conn
, conn
->ctask
))
2221 /* done with this in-progress ctask */
2225 if (iscsi_mtask_xmit(conn
, conn
->mtask
))
2227 /* done with this in-progress mtask */
2231 /* process immediate first */
2232 if (unlikely(__kfifo_len(conn
->immqueue
))) {
2233 struct iscsi_session
*session
= conn
->session
;
2234 while (__kfifo_get(conn
->immqueue
, (void*)&conn
->mtask
,
2236 if (iscsi_mtask_xmit(conn
, conn
->mtask
))
2239 if (conn
->mtask
->hdr
.itt
==
2240 cpu_to_be32(ISCSI_RESERVED_TAG
)) {
2241 spin_lock_bh(&session
->lock
);
2242 __kfifo_put(session
->mgmtpool
.queue
,
2243 (void*)&conn
->mtask
, sizeof(void*));
2244 spin_unlock_bh(&session
->lock
);
2247 /* done with this mtask */
2251 /* process write queue */
2252 while (__kfifo_get(conn
->writequeue
, (void*)&conn
->ctask
,
2254 if (iscsi_ctask_xmit(conn
, conn
->ctask
))
2258 /* process command queue */
2259 while (__kfifo_get(conn
->xmitqueue
, (void*)&conn
->ctask
,
2261 if (iscsi_ctask_xmit(conn
, conn
->ctask
))
2264 /* done with this ctask */
2267 /* process the rest control plane PDUs, if any */
2268 if (unlikely(__kfifo_len(conn
->mgmtqueue
))) {
2269 struct iscsi_session
*session
= conn
->session
;
2271 while (__kfifo_get(conn
->mgmtqueue
, (void*)&conn
->mtask
,
2273 if (iscsi_mtask_xmit(conn
, conn
->mtask
))
2276 if (conn
->mtask
->hdr
.itt
==
2277 cpu_to_be32(ISCSI_RESERVED_TAG
)) {
2278 spin_lock_bh(&session
->lock
);
2279 __kfifo_put(session
->mgmtpool
.queue
,
2280 (void*)&conn
->mtask
,
2282 spin_unlock_bh(&session
->lock
);
2285 /* done with this mtask */
2292 if (unlikely(conn
->suspend_tx
))
2299 iscsi_xmitworker(void *data
)
2301 struct iscsi_conn
*conn
= data
;
2304 * serialize Xmit worker on a per-connection basis.
2306 down(&conn
->xmitsema
);
2307 if (iscsi_data_xmit(conn
))
2308 schedule_work(&conn
->xmitwork
);
2309 up(&conn
->xmitsema
);
2312 #define FAILURE_BAD_HOST 1
2313 #define FAILURE_SESSION_FAILED 2
2314 #define FAILURE_SESSION_FREED 3
2315 #define FAILURE_WINDOW_CLOSED 4
2316 #define FAILURE_SESSION_TERMINATE 5
2319 iscsi_queuecommand(struct scsi_cmnd
*sc
, void (*done
)(struct scsi_cmnd
*))
2321 struct Scsi_Host
*host
;
2323 struct iscsi_session
*session
;
2324 struct iscsi_conn
*conn
= NULL
;
2325 struct iscsi_cmd_task
*ctask
= NULL
;
2327 sc
->scsi_done
= done
;
2330 host
= sc
->device
->host
;
2331 session
= iscsi_hostdata(host
->hostdata
);
2332 BUG_ON(host
!= session
->host
);
2334 spin_lock(&session
->lock
);
2336 if (session
->state
!= ISCSI_STATE_LOGGED_IN
) {
2337 if (session
->state
== ISCSI_STATE_FAILED
) {
2338 reason
= FAILURE_SESSION_FAILED
;
2340 } else if (session
->state
== ISCSI_STATE_TERMINATE
) {
2341 reason
= FAILURE_SESSION_TERMINATE
;
2344 reason
= FAILURE_SESSION_FREED
;
2349 * Check for iSCSI window and take care of CmdSN wrap-around
2351 if ((int)(session
->max_cmdsn
- session
->cmdsn
) < 0) {
2352 reason
= FAILURE_WINDOW_CLOSED
;
2356 conn
= session
->leadconn
;
2358 __kfifo_get(session
->cmdpool
.queue
, (void*)&ctask
, sizeof(void*));
2361 sc
->SCp
.phase
= session
->age
;
2362 sc
->SCp
.ptr
= (char*)ctask
;
2363 iscsi_cmd_init(conn
, ctask
, sc
);
2365 __kfifo_put(conn
->xmitqueue
, (void*)&ctask
, sizeof(void*));
2367 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2368 sc
->sc_data_direction
== DMA_TO_DEVICE
? "write" : "read",
2369 conn
->id
, (long)sc
, ctask
->itt
, sc
->request_bufflen
,
2370 session
->cmdsn
, session
->max_cmdsn
- session
->exp_cmdsn
+ 1);
2371 spin_unlock(&session
->lock
);
2373 if (!in_interrupt() && !down_trylock(&conn
->xmitsema
)) {
2374 spin_unlock_irq(host
->host_lock
);
2375 if (iscsi_data_xmit(conn
))
2376 schedule_work(&conn
->xmitwork
);
2377 up(&conn
->xmitsema
);
2378 spin_lock_irq(host
->host_lock
);
2380 schedule_work(&conn
->xmitwork
);
2385 spin_unlock(&session
->lock
);
2386 debug_scsi("cmd 0x%x rejected (%d)\n", sc
->cmnd
[0], reason
);
2387 return SCSI_MLQUEUE_HOST_BUSY
;
2390 spin_unlock(&session
->lock
);
2391 printk(KERN_ERR
"iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2392 sc
->cmnd
[0], reason
);
2393 sc
->sense_buffer
[0] = 0x70;
2394 sc
->sense_buffer
[2] = NOT_READY
;
2395 sc
->sense_buffer
[7] = 0x6;
2396 sc
->sense_buffer
[12] = 0x08;
2397 sc
->sense_buffer
[13] = 0x00;
2398 sc
->result
= (DID_NO_CONNECT
<< 16);
2399 sc
->resid
= sc
->request_bufflen
;
2405 iscsi_change_queue_depth(struct scsi_device
*sdev
, int depth
)
2407 if (depth
> ISCSI_MAX_CMD_PER_LUN
)
2408 depth
= ISCSI_MAX_CMD_PER_LUN
;
2409 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), depth
);
2410 return sdev
->queue_depth
;
2414 iscsi_pool_init(struct iscsi_queue
*q
, int max
, void ***items
, int item_size
)
2418 *items
= kmalloc(max
* sizeof(void*), GFP_KERNEL
);
2423 q
->pool
= kmalloc(max
* sizeof(void*), GFP_KERNEL
);
2424 if (q
->pool
== NULL
) {
2429 q
->queue
= kfifo_init((void*)q
->pool
, max
* sizeof(void*),
2431 if (q
->queue
== ERR_PTR(-ENOMEM
)) {
2437 for (i
= 0; i
< max
; i
++) {
2438 q
->pool
[i
] = kmalloc(item_size
, GFP_KERNEL
);
2439 if (q
->pool
[i
] == NULL
) {
2442 for (j
= 0; j
< i
; j
++)
2445 kfifo_free(q
->queue
);
2450 memset(q
->pool
[i
], 0, item_size
);
2451 (*items
)[i
] = q
->pool
[i
];
2452 __kfifo_put(q
->queue
, (void*)&q
->pool
[i
], sizeof(void*));
2458 iscsi_pool_free(struct iscsi_queue
*q
, void **items
)
2462 for (i
= 0; i
< q
->max
; i
++)
2468 static iscsi_connh_t
2469 iscsi_conn_create(iscsi_sessionh_t sessionh
, uint32_t conn_idx
)
2471 struct iscsi_session
*session
= iscsi_ptr(sessionh
);
2472 struct iscsi_conn
*conn
= NULL
;
2474 conn
= kmalloc(sizeof(struct iscsi_conn
), GFP_KERNEL
);
2476 goto conn_alloc_fail
;
2477 memset(conn
, 0, sizeof(struct iscsi_conn
));
2479 conn
->c_stage
= ISCSI_CONN_INITIAL_STAGE
;
2480 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
2481 conn
->id
= conn_idx
;
2482 conn
->exp_statsn
= 0;
2483 conn
->tmabort_state
= TMABORT_INITIAL
;
2485 /* initial operational parameters */
2486 conn
->hdr_size
= sizeof(struct iscsi_hdr
);
2487 conn
->data_size
= DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH
;
2488 conn
->max_recv_dlength
= DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH
;
2490 spin_lock_init(&conn
->lock
);
2492 /* initialize general xmit PDU commands queue */
2493 conn
->xmitqueue
= kfifo_alloc(session
->cmds_max
* sizeof(void*),
2495 if (conn
->xmitqueue
== ERR_PTR(-ENOMEM
))
2496 goto xmitqueue_alloc_fail
;
2498 /* initialize write response PDU commands queue */
2499 conn
->writequeue
= kfifo_alloc(session
->cmds_max
* sizeof(void*),
2501 if (conn
->writequeue
== ERR_PTR(-ENOMEM
))
2502 goto writequeue_alloc_fail
;
2504 /* initialize general immediate & non-immediate PDU commands queue */
2505 conn
->immqueue
= kfifo_alloc(session
->mgmtpool_max
* sizeof(void*),
2507 if (conn
->immqueue
== ERR_PTR(-ENOMEM
))
2508 goto immqueue_alloc_fail
;
2510 conn
->mgmtqueue
= kfifo_alloc(session
->mgmtpool_max
* sizeof(void*),
2512 if (conn
->mgmtqueue
== ERR_PTR(-ENOMEM
))
2513 goto mgmtqueue_alloc_fail
;
2515 INIT_WORK(&conn
->xmitwork
, iscsi_xmitworker
, conn
);
2517 /* allocate login_mtask used for the login/text sequences */
2518 spin_lock_bh(&session
->lock
);
2519 if (!__kfifo_get(session
->mgmtpool
.queue
,
2520 (void*)&conn
->login_mtask
,
2522 spin_unlock_bh(&session
->lock
);
2523 goto login_mtask_alloc_fail
;
2525 spin_unlock_bh(&session
->lock
);
2527 /* allocate initial PDU receive place holder */
2528 if (conn
->data_size
<= PAGE_SIZE
)
2529 conn
->data
= kmalloc(conn
->data_size
, GFP_KERNEL
);
2531 conn
->data
= (void*)__get_free_pages(GFP_KERNEL
,
2532 get_order(conn
->data_size
));
2534 goto max_recv_dlenght_alloc_fail
;
2536 init_timer(&conn
->tmabort_timer
);
2537 init_MUTEX(&conn
->xmitsema
);
2538 init_waitqueue_head(&conn
->ehwait
);
2540 return iscsi_handle(conn
);
2542 max_recv_dlenght_alloc_fail
:
2543 spin_lock_bh(&session
->lock
);
2544 __kfifo_put(session
->mgmtpool
.queue
, (void*)&conn
->login_mtask
,
2546 spin_unlock_bh(&session
->lock
);
2547 login_mtask_alloc_fail
:
2548 kfifo_free(conn
->mgmtqueue
);
2549 mgmtqueue_alloc_fail
:
2550 kfifo_free(conn
->immqueue
);
2551 immqueue_alloc_fail
:
2552 kfifo_free(conn
->writequeue
);
2553 writequeue_alloc_fail
:
2554 kfifo_free(conn
->xmitqueue
);
2555 xmitqueue_alloc_fail
:
2558 return iscsi_handle(NULL
);
2562 iscsi_conn_destroy(iscsi_connh_t connh
)
2564 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
2565 struct iscsi_session
*session
= conn
->session
;
2567 down(&conn
->xmitsema
);
2568 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2569 if (conn
->c_stage
== ISCSI_CONN_INITIAL_STAGE
&& conn
->sock
) {
2570 struct sock
*sk
= conn
->sock
->sk
;
2573 * conn_start() has never been called!
2574 * need to cleanup the socket.
2576 write_lock_bh(&sk
->sk_callback_lock
);
2577 set_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2578 write_unlock_bh(&sk
->sk_callback_lock
);
2580 sock_hold(conn
->sock
->sk
);
2581 iscsi_conn_restore_callbacks(conn
);
2582 sock_put(conn
->sock
->sk
);
2583 sock_release(conn
->sock
);
2587 spin_lock_bh(&session
->lock
);
2588 conn
->c_stage
= ISCSI_CONN_CLEANUP_WAIT
;
2589 if (session
->leadconn
== conn
) {
2591 * leading connection? then give up on recovery.
2593 session
->state
= ISCSI_STATE_TERMINATE
;
2594 wake_up(&conn
->ehwait
);
2596 spin_unlock_bh(&session
->lock
);
2598 up(&conn
->xmitsema
);
2601 * Block until all in-progress commands for this connection
2605 spin_lock_bh(&conn
->lock
);
2606 if (!session
->host
->host_busy
) { /* OK for ERL == 0 */
2607 spin_unlock_bh(&conn
->lock
);
2610 spin_unlock_bh(&conn
->lock
);
2611 msleep_interruptible(500);
2612 printk("conn_destroy(): host_busy %d host_failed %d\n",
2613 session
->host
->host_busy
, session
->host
->host_failed
);
2615 * force eh_abort() to unblock
2617 wake_up(&conn
->ehwait
);
2620 /* now free crypto */
2621 if (conn
->hdrdgst_en
|| conn
->datadgst_en
) {
2623 crypto_free_tfm(conn
->tx_tfm
);
2625 crypto_free_tfm(conn
->rx_tfm
);
2626 if (conn
->data_tx_tfm
)
2627 crypto_free_tfm(conn
->data_tx_tfm
);
2628 if (conn
->data_rx_tfm
)
2629 crypto_free_tfm(conn
->data_rx_tfm
);
2632 /* free conn->data, size = MaxRecvDataSegmentLength */
2633 if (conn
->data_size
<= PAGE_SIZE
)
2636 free_pages((unsigned long)conn
->data
,
2637 get_order(conn
->data_size
));
2639 spin_lock_bh(&session
->lock
);
2640 __kfifo_put(session
->mgmtpool
.queue
, (void*)&conn
->login_mtask
,
2642 list_del(&conn
->item
);
2643 if (list_empty(&session
->connections
))
2644 session
->leadconn
= NULL
;
2645 if (session
->leadconn
&& session
->leadconn
== conn
)
2646 session
->leadconn
= container_of(session
->connections
.next
,
2647 struct iscsi_conn
, item
);
2649 if (session
->leadconn
== NULL
)
2650 /* none connections exits.. reset sequencing */
2651 session
->cmdsn
= session
->max_cmdsn
= session
->exp_cmdsn
= 1;
2652 spin_unlock_bh(&session
->lock
);
2654 kfifo_free(conn
->xmitqueue
);
2655 kfifo_free(conn
->writequeue
);
2656 kfifo_free(conn
->immqueue
);
2657 kfifo_free(conn
->mgmtqueue
);
2662 iscsi_conn_bind(iscsi_sessionh_t sessionh
, iscsi_connh_t connh
,
2663 uint32_t transport_fd
, int is_leading
)
2665 struct iscsi_session
*session
= iscsi_ptr(sessionh
);
2666 struct iscsi_conn
*tmp
= ERR_PTR(-EEXIST
), *conn
= iscsi_ptr(connh
);
2668 struct socket
*sock
;
2671 /* lookup for existing socket */
2672 sock
= sockfd_lookup(transport_fd
, &err
);
2674 printk(KERN_ERR
"iscsi_tcp: sockfd_lookup failed %d\n", err
);
2678 /* lookup for existing connection */
2679 spin_lock_bh(&session
->lock
);
2680 list_for_each_entry(tmp
, &session
->connections
, item
) {
2682 if (conn
->c_stage
!= ISCSI_CONN_STOPPED
||
2683 conn
->stop_stage
== STOP_CONN_TERM
) {
2684 printk(KERN_ERR
"iscsi_tcp: can't bind "
2685 "non-stopped connection (%d:%d)\n",
2686 conn
->c_stage
, conn
->stop_stage
);
2687 spin_unlock_bh(&session
->lock
);
2694 /* bind new iSCSI connection to session */
2695 conn
->session
= session
;
2697 list_add(&conn
->item
, &session
->connections
);
2699 spin_unlock_bh(&session
->lock
);
2701 if (conn
->stop_stage
!= STOP_CONN_SUSPEND
) {
2702 /* bind iSCSI connection and socket */
2705 /* setup Socket parameters */
2708 sk
->sk_sndtimeo
= 15 * HZ
; /* FIXME: make it configurable */
2709 sk
->sk_allocation
= GFP_ATOMIC
;
2711 /* FIXME: disable Nagle's algorithm */
2714 * Intercept TCP callbacks for sendfile like receive
2717 iscsi_conn_set_callbacks(conn
);
2720 * set receive state machine into initial state
2722 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
2726 session
->leadconn
= conn
;
2729 * Unblock xmitworker(), Login Phase will pass through.
2731 clear_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2732 clear_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2738 iscsi_conn_start(iscsi_connh_t connh
)
2740 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
2741 struct iscsi_session
*session
= conn
->session
;
2744 /* FF phase warming up... */
2746 if (session
== NULL
) {
2747 printk(KERN_ERR
"iscsi_tcp: can't start unbound connection\n");
2751 sk
= conn
->sock
->sk
;
2753 write_lock_bh(&sk
->sk_callback_lock
);
2754 spin_lock_bh(&session
->lock
);
2755 conn
->c_stage
= ISCSI_CONN_STARTED
;
2756 session
->state
= ISCSI_STATE_LOGGED_IN
;
2758 switch(conn
->stop_stage
) {
2759 case STOP_CONN_RECOVER
:
2761 * unblock eh_abort() if it is blocked. re-try all
2762 * commands after successful recovery
2764 session
->conn_cnt
++;
2765 conn
->stop_stage
= 0;
2766 conn
->tmabort_state
= TMABORT_INITIAL
;
2768 wake_up(&conn
->ehwait
);
2770 case STOP_CONN_TERM
:
2771 session
->conn_cnt
++;
2772 conn
->stop_stage
= 0;
2774 case STOP_CONN_SUSPEND
:
2775 conn
->stop_stage
= 0;
2776 clear_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2777 clear_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2782 spin_unlock_bh(&session
->lock
);
2783 write_unlock_bh(&sk
->sk_callback_lock
);
2789 iscsi_conn_stop(iscsi_connh_t connh
, int flag
)
2791 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
2792 struct iscsi_session
*session
= conn
->session
;
2794 unsigned long flags
;
2796 BUG_ON(!conn
->sock
);
2797 sk
= conn
->sock
->sk
;
2798 write_lock_bh(&sk
->sk_callback_lock
);
2799 set_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2800 write_unlock_bh(&sk
->sk_callback_lock
);
2802 down(&conn
->xmitsema
);
2804 spin_lock_irqsave(session
->host
->host_lock
, flags
);
2805 spin_lock(&session
->lock
);
2806 conn
->stop_stage
= flag
;
2807 conn
->c_stage
= ISCSI_CONN_STOPPED
;
2808 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2810 if (flag
!= STOP_CONN_SUSPEND
)
2811 session
->conn_cnt
--;
2813 if (session
->conn_cnt
== 0 || session
->leadconn
== conn
)
2814 session
->state
= ISCSI_STATE_FAILED
;
2816 spin_unlock(&session
->lock
);
2817 spin_unlock_irqrestore(session
->host
->host_lock
, flags
);
2819 if (flag
== STOP_CONN_TERM
|| flag
== STOP_CONN_RECOVER
) {
2820 struct iscsi_cmd_task
*ctask
;
2821 struct iscsi_mgmt_task
*mtask
;
2824 * Socket must go now.
2826 sock_hold(conn
->sock
->sk
);
2827 iscsi_conn_restore_callbacks(conn
);
2828 sock_put(conn
->sock
->sk
);
2831 * flush xmit queues.
2833 spin_lock_bh(&session
->lock
);
2834 while (__kfifo_get(conn
->writequeue
, (void*)&ctask
,
2836 __kfifo_get(conn
->xmitqueue
, (void*)&ctask
,
2838 struct iscsi_r2t_info
*r2t
;
2841 * flush ctask's r2t queues
2843 while (__kfifo_get(ctask
->r2tqueue
, (void*)&r2t
,
2845 __kfifo_put(ctask
->r2tpool
.queue
, (void*)&r2t
,
2848 spin_unlock_bh(&session
->lock
);
2850 iscsi_ctask_cleanup(conn
, ctask
);
2852 spin_lock_bh(&session
->lock
);
2855 while (__kfifo_get(conn
->immqueue
, (void*)&mtask
,
2857 __kfifo_get(conn
->mgmtqueue
, (void*)&mtask
,
2859 __kfifo_put(session
->mgmtpool
.queue
,
2860 (void*)&mtask
, sizeof(void*));
2863 spin_unlock_bh(&session
->lock
);
2866 * release socket only after we stopped data_xmit()
2867 * activity and flushed all outstandings
2869 sock_release(conn
->sock
);
2873 * for connection level recovery we should not calculate
2874 * header digest. conn->hdr_size used for optimization
2875 * in hdr_extract() and will be re-negotiated at
2878 if (flag
== STOP_CONN_RECOVER
)
2879 conn
->hdr_size
= sizeof(struct iscsi_hdr
);
2881 up(&conn
->xmitsema
);
2885 iscsi_conn_send_generic(struct iscsi_conn
*conn
, struct iscsi_hdr
*hdr
,
2886 char *data
, uint32_t data_size
)
2888 struct iscsi_session
*session
= conn
->session
;
2889 struct iscsi_nopout
*nop
= (struct iscsi_nopout
*)hdr
;
2890 struct iscsi_mgmt_task
*mtask
;
2892 spin_lock_bh(&session
->lock
);
2893 if (session
->state
== ISCSI_STATE_TERMINATE
) {
2894 spin_unlock_bh(&session
->lock
);
2897 if (hdr
->opcode
== (ISCSI_OP_LOGIN
| ISCSI_OP_IMMEDIATE
) ||
2898 hdr
->opcode
== (ISCSI_OP_TEXT
| ISCSI_OP_IMMEDIATE
))
2900 * Login and Text are sent serially, in
2901 * request-followed-by-response sequence.
2902 * Same mtask can be used. Same ITT must be used.
2903 * Note that login_mtask is preallocated at conn_create().
2905 mtask
= conn
->login_mtask
;
2907 BUG_ON(conn
->c_stage
== ISCSI_CONN_INITIAL_STAGE
);
2908 BUG_ON(conn
->c_stage
== ISCSI_CONN_STOPPED
);
2910 if (!__kfifo_get(session
->mgmtpool
.queue
,
2911 (void*)&mtask
, sizeof(void*))) {
2912 spin_unlock_bh(&session
->lock
);
2918 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2920 if (hdr
->itt
!= cpu_to_be32(ISCSI_RESERVED_TAG
)) {
2921 hdr
->itt
= mtask
->itt
| (conn
->id
<< CID_SHIFT
) |
2922 (session
->age
<< AGE_SHIFT
);
2923 nop
->cmdsn
= cpu_to_be32(session
->cmdsn
);
2924 if (conn
->c_stage
== ISCSI_CONN_STARTED
&&
2925 !(hdr
->opcode
& ISCSI_OP_IMMEDIATE
))
2928 /* do not advance CmdSN */
2929 nop
->cmdsn
= cpu_to_be32(session
->cmdsn
);
2931 nop
->exp_statsn
= cpu_to_be32(conn
->exp_statsn
);
2933 memcpy(&mtask
->hdr
, hdr
, sizeof(struct iscsi_hdr
));
2935 iscsi_buf_init_virt(&mtask
->headbuf
, (char*)&mtask
->hdr
,
2936 sizeof(struct iscsi_hdr
));
2938 spin_unlock_bh(&session
->lock
);
2941 memcpy(mtask
->data
, data
, data_size
);
2942 mtask
->data_count
= data_size
;
2944 mtask
->data_count
= 0;
2946 mtask
->xmstate
= XMSTATE_IMM_HDR
;
2948 if (mtask
->data_count
) {
2949 iscsi_buf_init_iov(&mtask
->sendbuf
, (char*)mtask
->data
,
2953 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2954 hdr
->opcode
, hdr
->itt
, data_size
);
2957 * since send_pdu() could be called at least from two contexts,
2958 * we need to serialize __kfifo_put, so we don't have to take
2959 * additional lock on fast data-path
2961 if (hdr
->opcode
& ISCSI_OP_IMMEDIATE
)
2962 __kfifo_put(conn
->immqueue
, (void*)&mtask
, sizeof(void*));
2964 __kfifo_put(conn
->mgmtqueue
, (void*)&mtask
, sizeof(void*));
2966 schedule_work(&conn
->xmitwork
);
2972 iscsi_eh_host_reset(struct scsi_cmnd
*sc
)
2974 struct iscsi_cmd_task
*ctask
= (struct iscsi_cmd_task
*)sc
->SCp
.ptr
;
2975 struct iscsi_conn
*conn
= ctask
->conn
;
2976 struct iscsi_session
*session
= conn
->session
;
2978 spin_lock_bh(&session
->lock
);
2979 if (session
->state
== ISCSI_STATE_TERMINATE
) {
2980 debug_scsi("failing host reset: session terminated "
2981 "[CID %d age %d]", conn
->id
, session
->age
);
2982 spin_unlock_bh(&session
->lock
);
2985 spin_unlock_bh(&session
->lock
);
2987 debug_scsi("failing connection CID %d due to SCSI host reset "
2988 "[itt 0x%x age %d]", conn
->id
, ctask
->itt
,
2990 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
2996 iscsi_tmabort_timedout(unsigned long data
)
2998 struct iscsi_cmd_task
*ctask
= (struct iscsi_cmd_task
*)data
;
2999 struct iscsi_conn
*conn
= ctask
->conn
;
3000 struct iscsi_session
*session
= conn
->session
;
3002 spin_lock(&session
->lock
);
3003 if (conn
->tmabort_state
== TMABORT_INITIAL
) {
3004 __kfifo_put(session
->mgmtpool
.queue
,
3005 (void*)&ctask
->mtask
, sizeof(void*));
3006 conn
->tmabort_state
= TMABORT_TIMEDOUT
;
3007 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
3008 (long)ctask
->sc
, ctask
->itt
);
3009 /* unblock eh_abort() */
3010 wake_up(&conn
->ehwait
);
3012 spin_unlock(&session
->lock
);
3016 iscsi_eh_abort(struct scsi_cmnd
*sc
)
3019 struct iscsi_cmd_task
*ctask
= (struct iscsi_cmd_task
*)sc
->SCp
.ptr
;
3020 struct iscsi_conn
*conn
= ctask
->conn
;
3021 struct iscsi_session
*session
= conn
->session
;
3023 conn
->eh_abort_cnt
++;
3024 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc
, ctask
->itt
);
3027 * two cases for ERL=0 here:
3029 * 1) connection-level failure;
3030 * 2) recovery due protocol error;
3032 down(&conn
->xmitsema
);
3033 spin_lock_bh(&session
->lock
);
3034 if (session
->state
!= ISCSI_STATE_LOGGED_IN
) {
3035 if (session
->state
== ISCSI_STATE_TERMINATE
) {
3036 spin_unlock_bh(&session
->lock
);
3037 up(&conn
->xmitsema
);
3040 spin_unlock_bh(&session
->lock
);
3042 struct iscsi_tm
*hdr
= &conn
->tmhdr
;
3045 * Still LOGGED_IN...
3048 if (!ctask
->sc
|| sc
->SCp
.phase
!= session
->age
) {
3050 * 1) ctask completed before time out. But session
3051 * is still ok => Happy Retry.
3052 * 2) session was re-open during time out of ctask.
3054 spin_unlock_bh(&session
->lock
);
3055 up(&conn
->xmitsema
);
3058 conn
->tmabort_state
= TMABORT_INITIAL
;
3059 spin_unlock_bh(&session
->lock
);
3062 * ctask timed out but session is OK
3063 * ERL=0 requires task mgmt abort to be issued on each
3064 * failed command. requests must be serialized.
3066 memset(hdr
, 0, sizeof(struct iscsi_tm
));
3067 hdr
->opcode
= ISCSI_OP_SCSI_TMFUNC
| ISCSI_OP_IMMEDIATE
;
3068 hdr
->flags
= ISCSI_TM_FUNC_ABORT_TASK
;
3069 hdr
->flags
|= ISCSI_FLAG_CMD_FINAL
;
3070 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
3071 hdr
->rtt
= ctask
->hdr
.itt
;
3072 hdr
->refcmdsn
= ctask
->hdr
.cmdsn
;
3074 rc
= iscsi_conn_send_generic(conn
, (struct iscsi_hdr
*)hdr
,
3077 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
3078 debug_scsi("abort sent failure [itt 0x%x]", ctask
->itt
);
3080 struct iscsi_r2t_info
*r2t
;
3083 * TMF abort vs. TMF response race logic
3085 spin_lock_bh(&session
->lock
);
3086 ctask
->mtask
= (struct iscsi_mgmt_task
*)
3087 session
->mgmt_cmds
[(hdr
->itt
& ITT_MASK
) -
3088 ISCSI_MGMT_ITT_OFFSET
];
3090 * have to flush r2tqueue to avoid r2t leaks
3092 while (__kfifo_get(ctask
->r2tqueue
, (void*)&r2t
,
3094 __kfifo_put(ctask
->r2tpool
.queue
, (void*)&r2t
,
3097 if (conn
->tmabort_state
== TMABORT_INITIAL
) {
3098 conn
->tmfcmd_pdus_cnt
++;
3099 conn
->tmabort_timer
.expires
= 3*HZ
+ jiffies
;
3100 conn
->tmabort_timer
.function
=
3101 iscsi_tmabort_timedout
;
3102 conn
->tmabort_timer
.data
= (unsigned long)ctask
;
3103 add_timer(&conn
->tmabort_timer
);
3104 debug_scsi("abort sent [itt 0x%x]", ctask
->itt
);
3107 conn
->tmabort_state
== TMABORT_SUCCESS
) {
3108 conn
->tmabort_state
= TMABORT_INITIAL
;
3109 spin_unlock_bh(&session
->lock
);
3110 up(&conn
->xmitsema
);
3113 conn
->tmabort_state
= TMABORT_INITIAL
;
3114 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
3116 spin_unlock_bh(&session
->lock
);
3119 up(&conn
->xmitsema
);
3123 * block eh thread until:
3125 * 1) abort response;
3127 * 3) session re-opened;
3128 * 4) session terminated;
3131 int p_state
= session
->state
;
3133 rc
= wait_event_interruptible(conn
->ehwait
,
3134 (p_state
== ISCSI_STATE_LOGGED_IN
?
3135 (session
->state
== ISCSI_STATE_TERMINATE
||
3136 conn
->tmabort_state
!= TMABORT_INITIAL
) :
3137 (session
->state
== ISCSI_STATE_TERMINATE
||
3138 session
->state
== ISCSI_STATE_LOGGED_IN
)));
3141 session
->state
= ISCSI_STATE_TERMINATE
;
3145 if (signal_pending(current
))
3146 flush_signals(current
);
3148 if (session
->state
== ISCSI_STATE_TERMINATE
)
3151 spin_lock_bh(&session
->lock
);
3152 if (sc
->SCp
.phase
== session
->age
&&
3153 (conn
->tmabort_state
== TMABORT_TIMEDOUT
||
3154 conn
->tmabort_state
== TMABORT_FAILED
)) {
3155 conn
->tmabort_state
= TMABORT_INITIAL
;
3158 * ctask completed before tmf abort response or
3160 * But session is still ok => Happy Retry.
3162 spin_unlock_bh(&session
->lock
);
3165 spin_unlock_bh(&session
->lock
);
3166 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
3169 spin_unlock_bh(&session
->lock
);
3174 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc
, ctask
->itt
);
3179 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc
, ctask
->itt
);
3183 del_timer_sync(&conn
->tmabort_timer
);
3185 down(&conn
->xmitsema
);
3187 struct sock
*sk
= conn
->sock
->sk
;
3189 write_lock_bh(&sk
->sk_callback_lock
);
3190 iscsi_ctask_cleanup(conn
, ctask
);
3191 write_unlock_bh(&sk
->sk_callback_lock
);
3193 up(&conn
->xmitsema
);
3198 iscsi_r2tpool_alloc(struct iscsi_session
*session
)
3204 * initialize per-task: R2T pool and xmit queue
3206 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
3207 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
3210 * pre-allocated x4 as much r2ts to handle race when
3211 * target acks DataOut faster than we data_xmit() queues
3212 * could replenish r2tqueue.
3216 if (iscsi_pool_init(&ctask
->r2tpool
, session
->max_r2t
* 4,
3217 (void***)&ctask
->r2ts
, sizeof(struct iscsi_r2t_info
))) {
3218 goto r2t_alloc_fail
;
3221 /* R2T xmit queue */
3222 ctask
->r2tqueue
= kfifo_alloc(
3223 session
->max_r2t
* 4 * sizeof(void*), GFP_KERNEL
, NULL
);
3224 if (ctask
->r2tqueue
== ERR_PTR(-ENOMEM
)) {
3225 iscsi_pool_free(&ctask
->r2tpool
, (void**)ctask
->r2ts
);
3226 goto r2t_alloc_fail
;
3231 * Data-Out PDU's within R2T-sequence can be quite big;
3234 ctask
->datapool
= mempool_create(ISCSI_DTASK_DEFAULT_MAX
,
3235 mempool_alloc_slab
, mempool_free_slab
, taskcache
);
3236 if (ctask
->datapool
== NULL
) {
3237 kfifo_free(ctask
->r2tqueue
);
3238 iscsi_pool_free(&ctask
->r2tpool
, (void**)ctask
->r2ts
);
3239 goto r2t_alloc_fail
;
3241 INIT_LIST_HEAD(&ctask
->dataqueue
);
3247 for (i
= 0; i
< cmd_i
; i
++) {
3248 mempool_destroy(session
->cmds
[i
]->datapool
);
3249 kfifo_free(session
->cmds
[i
]->r2tqueue
);
3250 iscsi_pool_free(&session
->cmds
[i
]->r2tpool
,
3251 (void**)session
->cmds
[i
]->r2ts
);
3257 iscsi_r2tpool_free(struct iscsi_session
*session
)
3261 for (i
= 0; i
< session
->cmds_max
; i
++) {
3262 mempool_destroy(session
->cmds
[i
]->datapool
);
3263 kfifo_free(session
->cmds
[i
]->r2tqueue
);
3264 iscsi_pool_free(&session
->cmds
[i
]->r2tpool
,
3265 (void**)session
->cmds
[i
]->r2ts
);
3269 static struct scsi_host_template iscsi_sht
= {
3270 .name
= "iSCSI Initiator over TCP/IP, v."
3272 .queuecommand
= iscsi_queuecommand
,
3273 .change_queue_depth
= iscsi_change_queue_depth
,
3274 .can_queue
= ISCSI_XMIT_CMDS_MAX
- 1,
3275 .sg_tablesize
= ISCSI_SG_TABLESIZE
,
3276 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
3277 .eh_abort_handler
= iscsi_eh_abort
,
3278 .eh_host_reset_handler
= iscsi_eh_host_reset
,
3279 .use_clustering
= DISABLE_CLUSTERING
,
3280 .proc_name
= "iscsi_tcp",
3284 static iscsi_sessionh_t
3285 iscsi_session_create(uint32_t initial_cmdsn
, struct Scsi_Host
*host
)
3288 struct iscsi_session
*session
;
3290 session
= iscsi_hostdata(host
->hostdata
);
3291 memset(session
, 0, sizeof(struct iscsi_session
));
3293 session
->host
= host
;
3294 session
->id
= host
->host_no
;
3295 session
->state
= ISCSI_STATE_LOGGED_IN
;
3296 session
->mgmtpool_max
= ISCSI_MGMT_CMDS_MAX
;
3297 session
->cmds_max
= ISCSI_XMIT_CMDS_MAX
;
3298 session
->cmdsn
= initial_cmdsn
;
3299 session
->exp_cmdsn
= initial_cmdsn
+ 1;
3300 session
->max_cmdsn
= initial_cmdsn
+ 1;
3301 session
->max_r2t
= 1;
3303 /* initialize SCSI PDU commands pool */
3304 if (iscsi_pool_init(&session
->cmdpool
, session
->cmds_max
,
3305 (void***)&session
->cmds
, sizeof(struct iscsi_cmd_task
)))
3306 goto cmdpool_alloc_fail
;
3308 /* pre-format cmds pool with ITT */
3309 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++)
3310 session
->cmds
[cmd_i
]->itt
= cmd_i
;
3312 spin_lock_init(&session
->lock
);
3313 INIT_LIST_HEAD(&session
->connections
);
3315 /* initialize immediate command pool */
3316 if (iscsi_pool_init(&session
->mgmtpool
, session
->mgmtpool_max
,
3317 (void***)&session
->mgmt_cmds
, sizeof(struct iscsi_mgmt_task
)))
3318 goto mgmtpool_alloc_fail
;
3321 /* pre-format immediate cmds pool with ITT */
3322 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++) {
3323 session
->mgmt_cmds
[cmd_i
]->itt
= ISCSI_MGMT_ITT_OFFSET
+ cmd_i
;
3324 session
->mgmt_cmds
[cmd_i
]->data
= kmalloc(
3325 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH
, GFP_KERNEL
);
3326 if (!session
->mgmt_cmds
[cmd_i
]->data
) {
3329 for (j
= 0; j
< cmd_i
; j
++)
3330 kfree(session
->mgmt_cmds
[j
]->data
);
3331 goto immdata_alloc_fail
;
3335 if (iscsi_r2tpool_alloc(session
))
3336 goto r2tpool_alloc_fail
;
3338 return iscsi_handle(session
);
3341 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++)
3342 kfree(session
->mgmt_cmds
[cmd_i
]->data
);
3343 iscsi_pool_free(&session
->mgmtpool
, (void**)session
->mgmt_cmds
);
3345 mgmtpool_alloc_fail
:
3346 iscsi_pool_free(&session
->cmdpool
, (void**)session
->cmds
);
3348 return iscsi_handle(NULL
);
3352 iscsi_session_destroy(iscsi_sessionh_t sessionh
)
3355 struct iscsi_data_task
*dtask
, *n
;
3356 struct iscsi_session
*session
= iscsi_ptr(sessionh
);
3358 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
3359 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
3360 list_for_each_entry_safe(dtask
, n
, &ctask
->dataqueue
, item
) {
3361 list_del(&dtask
->item
);
3362 mempool_free(dtask
, ctask
->datapool
);
3366 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++)
3367 kfree(session
->mgmt_cmds
[cmd_i
]->data
);
3369 iscsi_r2tpool_free(session
);
3370 iscsi_pool_free(&session
->mgmtpool
, (void**)session
->mgmt_cmds
);
3371 iscsi_pool_free(&session
->cmdpool
, (void**)session
->cmds
);
3375 iscsi_conn_set_param(iscsi_connh_t connh
, enum iscsi_param param
,
3378 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
3379 struct iscsi_session
*session
= conn
->session
;
3381 spin_lock_bh(&session
->lock
);
3382 if (conn
->c_stage
!= ISCSI_CONN_INITIAL_STAGE
&&
3383 conn
->stop_stage
!= STOP_CONN_RECOVER
) {
3384 printk(KERN_ERR
"iscsi_tcp: can not change parameter [%d]\n",
3386 spin_unlock_bh(&session
->lock
);
3389 spin_unlock_bh(&session
->lock
);
3392 case ISCSI_PARAM_MAX_RECV_DLENGTH
: {
3393 char *saveptr
= conn
->data
;
3394 int flags
= GFP_KERNEL
;
3396 if (conn
->data_size
>= value
) {
3397 conn
->max_recv_dlength
= value
;
3401 spin_lock_bh(&session
->lock
);
3402 if (conn
->stop_stage
== STOP_CONN_RECOVER
)
3404 spin_unlock_bh(&session
->lock
);
3406 if (value
<= PAGE_SIZE
)
3407 conn
->data
= kmalloc(value
, flags
);
3409 conn
->data
= (void*)__get_free_pages(flags
,
3411 if (conn
->data
== NULL
) {
3412 conn
->data
= saveptr
;
3415 if (conn
->data_size
<= PAGE_SIZE
)
3418 free_pages((unsigned long)saveptr
,
3419 get_order(conn
->data_size
));
3420 conn
->max_recv_dlength
= value
;
3421 conn
->data_size
= value
;
3424 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
3425 conn
->max_xmit_dlength
= value
;
3427 case ISCSI_PARAM_HDRDGST_EN
:
3428 conn
->hdrdgst_en
= value
;
3429 conn
->hdr_size
= sizeof(struct iscsi_hdr
);
3430 if (conn
->hdrdgst_en
) {
3431 conn
->hdr_size
+= sizeof(__u32
);
3433 conn
->tx_tfm
= crypto_alloc_tfm("crc32c", 0);
3437 conn
->rx_tfm
= crypto_alloc_tfm("crc32c", 0);
3438 if (!conn
->rx_tfm
) {
3439 crypto_free_tfm(conn
->tx_tfm
);
3444 crypto_free_tfm(conn
->tx_tfm
);
3446 crypto_free_tfm(conn
->rx_tfm
);
3449 case ISCSI_PARAM_DATADGST_EN
:
3450 conn
->datadgst_en
= value
;
3451 if (conn
->datadgst_en
) {
3452 if (!conn
->data_tx_tfm
)
3454 crypto_alloc_tfm("crc32c", 0);
3455 if (!conn
->data_tx_tfm
)
3457 if (!conn
->data_rx_tfm
)
3459 crypto_alloc_tfm("crc32c", 0);
3460 if (!conn
->data_rx_tfm
) {
3461 crypto_free_tfm(conn
->data_tx_tfm
);
3465 if (conn
->data_tx_tfm
)
3466 crypto_free_tfm(conn
->data_tx_tfm
);
3467 if (conn
->data_rx_tfm
)
3468 crypto_free_tfm(conn
->data_rx_tfm
);
3471 case ISCSI_PARAM_INITIAL_R2T_EN
:
3472 session
->initial_r2t_en
= value
;
3474 case ISCSI_PARAM_MAX_R2T
:
3475 if (session
->max_r2t
== roundup_pow_of_two(value
))
3477 iscsi_r2tpool_free(session
);
3478 session
->max_r2t
= value
;
3479 if (session
->max_r2t
& (session
->max_r2t
- 1))
3480 session
->max_r2t
= roundup_pow_of_two(session
->max_r2t
);
3481 if (iscsi_r2tpool_alloc(session
))
3484 case ISCSI_PARAM_IMM_DATA_EN
:
3485 session
->imm_data_en
= value
;
3487 case ISCSI_PARAM_FIRST_BURST
:
3488 session
->first_burst
= value
;
3490 case ISCSI_PARAM_MAX_BURST
:
3491 session
->max_burst
= value
;
3493 case ISCSI_PARAM_PDU_INORDER_EN
:
3494 session
->pdu_inorder_en
= value
;
3496 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
3497 session
->dataseq_inorder_en
= value
;
3499 case ISCSI_PARAM_ERL
:
3500 session
->erl
= value
;
3502 case ISCSI_PARAM_IFMARKER_EN
:
3504 session
->ifmarker_en
= value
;
3506 case ISCSI_PARAM_OFMARKER_EN
:
3508 session
->ofmarker_en
= value
;
3518 iscsi_conn_get_param(iscsi_connh_t connh
, enum iscsi_param param
,
3521 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
3522 struct iscsi_session
*session
= conn
->session
;
3525 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
3526 *value
= conn
->max_recv_dlength
;
3528 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
3529 *value
= conn
->max_xmit_dlength
;
3531 case ISCSI_PARAM_HDRDGST_EN
:
3532 *value
= conn
->hdrdgst_en
;
3534 case ISCSI_PARAM_DATADGST_EN
:
3535 *value
= conn
->datadgst_en
;
3537 case ISCSI_PARAM_INITIAL_R2T_EN
:
3538 *value
= session
->initial_r2t_en
;
3540 case ISCSI_PARAM_MAX_R2T
:
3541 *value
= session
->max_r2t
;
3543 case ISCSI_PARAM_IMM_DATA_EN
:
3544 *value
= session
->imm_data_en
;
3546 case ISCSI_PARAM_FIRST_BURST
:
3547 *value
= session
->first_burst
;
3549 case ISCSI_PARAM_MAX_BURST
:
3550 *value
= session
->max_burst
;
3552 case ISCSI_PARAM_PDU_INORDER_EN
:
3553 *value
= session
->pdu_inorder_en
;
3555 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
3556 *value
= session
->dataseq_inorder_en
;
3558 case ISCSI_PARAM_ERL
:
3559 *value
= session
->erl
;
3561 case ISCSI_PARAM_IFMARKER_EN
:
3562 *value
= session
->ifmarker_en
;
3564 case ISCSI_PARAM_OFMARKER_EN
:
3565 *value
= session
->ofmarker_en
;
3568 return ISCSI_ERR_PARAM_NOT_FOUND
;
3575 iscsi_conn_get_stats(iscsi_connh_t connh
, struct iscsi_stats
*stats
)
3577 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
3579 stats
->txdata_octets
= conn
->txdata_octets
;
3580 stats
->rxdata_octets
= conn
->rxdata_octets
;
3581 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
3582 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
3583 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
3584 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
3585 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
3586 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
3587 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
3588 stats
->custom_length
= 3;
3589 strcpy(stats
->custom
[0].desc
, "tx_sendpage_failures");
3590 stats
->custom
[0].value
= conn
->sendpage_failures_cnt
;
3591 strcpy(stats
->custom
[1].desc
, "rx_discontiguous_hdr");
3592 stats
->custom
[1].value
= conn
->discontiguous_hdr_cnt
;
3593 strcpy(stats
->custom
[2].desc
, "eh_abort_cnt");
3594 stats
->custom
[2].value
= conn
->eh_abort_cnt
;
3598 iscsi_conn_send_pdu(iscsi_connh_t connh
, struct iscsi_hdr
*hdr
, char *data
,
3601 struct iscsi_conn
*conn
= iscsi_ptr(connh
);
3604 down(&conn
->xmitsema
);
3605 rc
= iscsi_conn_send_generic(conn
, hdr
, data
, data_size
);
3606 up(&conn
->xmitsema
);
3611 static struct iscsi_transport iscsi_tcp_transport
= {
3612 .owner
= THIS_MODULE
,
3614 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
3616 .host_template
= &iscsi_sht
,
3617 .hostdata_size
= sizeof(struct iscsi_session
),
3619 .max_cmd_len
= ISCSI_TCP_MAX_CMD_LEN
,
3620 .create_session
= iscsi_session_create
,
3621 .destroy_session
= iscsi_session_destroy
,
3622 .create_conn
= iscsi_conn_create
,
3623 .bind_conn
= iscsi_conn_bind
,
3624 .destroy_conn
= iscsi_conn_destroy
,
3625 .set_param
= iscsi_conn_set_param
,
3626 .get_param
= iscsi_conn_get_param
,
3627 .start_conn
= iscsi_conn_start
,
3628 .stop_conn
= iscsi_conn_stop
,
3629 .send_pdu
= iscsi_conn_send_pdu
,
3630 .get_stats
= iscsi_conn_get_stats
,
3634 iscsi_tcp_init(void)
3638 if (iscsi_max_lun
< 1) {
3639 printk(KERN_ERR
"Invalid max_lun value of %u\n", iscsi_max_lun
);
3642 iscsi_tcp_transport
.max_lun
= iscsi_max_lun
;
3644 taskcache
= kmem_cache_create("iscsi_taskcache",
3645 sizeof(struct iscsi_data_task
), 0,
3646 SLAB_HWCACHE_ALIGN
| SLAB_NO_REAP
, NULL
, NULL
);
3650 error
= iscsi_register_transport(&iscsi_tcp_transport
);
3652 kmem_cache_destroy(taskcache
);
3658 iscsi_tcp_exit(void)
3660 iscsi_unregister_transport(&iscsi_tcp_transport
);
3661 kmem_cache_destroy(taskcache
);
3664 module_init(iscsi_tcp_init
);
3665 module_exit(iscsi_tcp_exit
);