2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * See the file COPYING included with this distribution for more details.
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
36 #include <linux/mutex.h>
38 #include <scsi/scsi_cmnd.h>
39 #include <scsi/scsi_device.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_request.h>
42 #include <scsi/scsi_tcq.h>
43 #include <scsi/scsi_host.h>
44 #include <scsi/scsi.h>
45 #include <scsi/scsi_transport_iscsi.h>
47 #include "iscsi_tcp.h"
49 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
50 "Alex Aizman <itn780@yahoo.com>");
51 MODULE_DESCRIPTION("iSCSI/TCP data-path");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION("0:4.445");
54 /* #define DEBUG_TCP */
55 /* #define DEBUG_SCSI */
59 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
61 #define debug_tcp(fmt...)
65 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
67 #define debug_scsi(fmt...)
77 #define INVALID_SN_DELTA 0xffff
79 static unsigned int iscsi_max_lun
= 512;
80 module_param_named(max_lun
, iscsi_max_lun
, uint
, S_IRUGO
);
83 static kmem_cache_t
*taskcache
;
86 iscsi_buf_init_virt(struct iscsi_buf
*ibuf
, char *vbuf
, int size
)
88 sg_init_one(&ibuf
->sg
, (u8
*)vbuf
, size
);
90 ibuf
->use_sendmsg
= 0;
94 iscsi_buf_init_iov(struct iscsi_buf
*ibuf
, char *vbuf
, int size
)
96 ibuf
->sg
.page
= virt_to_page(vbuf
);
97 ibuf
->sg
.offset
= offset_in_page(vbuf
);
98 ibuf
->sg
.length
= size
;
100 ibuf
->use_sendmsg
= 1;
104 iscsi_buf_init_sg(struct iscsi_buf
*ibuf
, struct scatterlist
*sg
)
106 ibuf
->sg
.page
= sg
->page
;
107 ibuf
->sg
.offset
= sg
->offset
;
108 ibuf
->sg
.length
= sg
->length
;
110 * Fastpath: sg element fits into single page
112 if (sg
->length
+ sg
->offset
<= PAGE_SIZE
&& !PageSlab(sg
->page
))
113 ibuf
->use_sendmsg
= 0;
115 ibuf
->use_sendmsg
= 1;
120 iscsi_buf_left(struct iscsi_buf
*ibuf
)
124 rc
= ibuf
->sg
.length
- ibuf
->sent
;
130 iscsi_hdr_digest(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
133 crypto_digest_digest(conn
->tx_tfm
, &buf
->sg
, 1, crc
);
134 buf
->sg
.length
+= sizeof(uint32_t);
138 iscsi_conn_failure(struct iscsi_conn
*conn
, enum iscsi_err err
)
140 struct iscsi_session
*session
= conn
->session
;
143 spin_lock_irqsave(&session
->lock
, flags
);
144 if (session
->conn_cnt
== 1 || session
->leadconn
== conn
)
145 session
->state
= ISCSI_STATE_FAILED
;
146 spin_unlock_irqrestore(&session
->lock
, flags
);
147 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
148 set_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
149 iscsi_conn_error(conn
->cls_conn
, err
);
153 iscsi_check_assign_cmdsn(struct iscsi_session
*session
, struct iscsi_nopin
*hdr
)
155 uint32_t max_cmdsn
= be32_to_cpu(hdr
->max_cmdsn
);
156 uint32_t exp_cmdsn
= be32_to_cpu(hdr
->exp_cmdsn
);
158 if (max_cmdsn
< exp_cmdsn
-1 &&
159 max_cmdsn
> exp_cmdsn
- INVALID_SN_DELTA
)
160 return ISCSI_ERR_MAX_CMDSN
;
161 if (max_cmdsn
> session
->max_cmdsn
||
162 max_cmdsn
< session
->max_cmdsn
- INVALID_SN_DELTA
)
163 session
->max_cmdsn
= max_cmdsn
;
164 if (exp_cmdsn
> session
->exp_cmdsn
||
165 exp_cmdsn
< session
->exp_cmdsn
- INVALID_SN_DELTA
)
166 session
->exp_cmdsn
= exp_cmdsn
;
172 iscsi_hdr_extract(struct iscsi_conn
*conn
)
174 struct sk_buff
*skb
= conn
->in
.skb
;
176 if (conn
->in
.copy
>= conn
->hdr_size
&&
177 conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
) {
179 * Zero-copy PDU Header: using connection context
180 * to store header pointer.
182 if (skb_shinfo(skb
)->frag_list
== NULL
&&
183 !skb_shinfo(skb
)->nr_frags
)
184 conn
->in
.hdr
= (struct iscsi_hdr
*)
185 ((char*)skb
->data
+ conn
->in
.offset
);
187 /* ignoring return code since we checked
189 skb_copy_bits(skb
, conn
->in
.offset
,
190 &conn
->hdr
, conn
->hdr_size
);
191 conn
->in
.hdr
= &conn
->hdr
;
193 conn
->in
.offset
+= conn
->hdr_size
;
194 conn
->in
.copy
-= conn
->hdr_size
;
200 * PDU header scattered across SKB's,
201 * copying it... This'll happen quite rarely.
204 if (conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
)
205 conn
->in
.hdr_offset
= 0;
207 hdr_remains
= conn
->hdr_size
- conn
->in
.hdr_offset
;
208 BUG_ON(hdr_remains
<= 0);
210 copylen
= min(conn
->in
.copy
, hdr_remains
);
211 skb_copy_bits(skb
, conn
->in
.offset
,
212 (char*)&conn
->hdr
+ conn
->in
.hdr_offset
, copylen
);
214 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
215 "in.copy %d\n", conn
->in
.hdr_offset
, copylen
,
216 conn
->in
.offset
, conn
->in
.copy
);
218 conn
->in
.offset
+= copylen
;
219 conn
->in
.copy
-= copylen
;
220 if (copylen
< hdr_remains
) {
221 conn
->in_progress
= IN_PROGRESS_HEADER_GATHER
;
222 conn
->in
.hdr_offset
+= copylen
;
225 conn
->in
.hdr
= &conn
->hdr
;
226 conn
->discontiguous_hdr_cnt
++;
227 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
234 iscsi_ctask_cleanup(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
236 struct scsi_cmnd
*sc
= ctask
->sc
;
237 struct iscsi_session
*session
= conn
->session
;
239 spin_lock(&session
->lock
);
241 spin_unlock(&session
->lock
);
244 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
245 struct iscsi_data_task
*dtask
, *n
;
246 /* WRITE: cleanup Data-Out's if any */
247 list_for_each_entry_safe(dtask
, n
, &ctask
->dataqueue
, item
) {
248 list_del(&dtask
->item
);
249 mempool_free(dtask
, ctask
->datapool
);
252 ctask
->xmstate
= XMSTATE_IDLE
;
255 __kfifo_put(session
->cmdpool
.queue
, (void*)&ctask
, sizeof(void*));
256 spin_unlock(&session
->lock
);
260 * iscsi_cmd_rsp - SCSI Command Response processing
261 * @conn: iscsi connection
262 * @ctask: scsi command task
265 iscsi_cmd_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
268 struct iscsi_cmd_rsp
*rhdr
= (struct iscsi_cmd_rsp
*)conn
->in
.hdr
;
269 struct iscsi_session
*session
= conn
->session
;
270 struct scsi_cmnd
*sc
= ctask
->sc
;
272 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
274 sc
->result
= (DID_ERROR
<< 16);
278 conn
->exp_statsn
= be32_to_cpu(rhdr
->statsn
) + 1;
280 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
282 if (rhdr
->response
!= ISCSI_STATUS_CMD_COMPLETED
) {
283 sc
->result
= (DID_ERROR
<< 16);
287 if (rhdr
->cmd_status
== SAM_STAT_CHECK_CONDITION
&& conn
->senselen
) {
288 int sensecopy
= min(conn
->senselen
, SCSI_SENSE_BUFFERSIZE
);
290 memcpy(sc
->sense_buffer
, conn
->data
+ 2, sensecopy
);
291 debug_scsi("copied %d bytes of sense\n", sensecopy
);
294 if (sc
->sc_data_direction
== DMA_TO_DEVICE
)
297 if (rhdr
->flags
& ISCSI_FLAG_CMD_UNDERFLOW
) {
298 int res_count
= be32_to_cpu(rhdr
->residual_count
);
300 if (res_count
> 0 && res_count
<= sc
->request_bufflen
)
301 sc
->resid
= res_count
;
303 sc
->result
= (DID_BAD_TARGET
<< 16) | rhdr
->cmd_status
;
304 } else if (rhdr
->flags
& ISCSI_FLAG_CMD_BIDI_UNDERFLOW
)
305 sc
->result
= (DID_BAD_TARGET
<< 16) | rhdr
->cmd_status
;
306 else if (rhdr
->flags
& ISCSI_FLAG_CMD_OVERFLOW
)
307 sc
->resid
= be32_to_cpu(rhdr
->residual_count
);
310 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
311 (long)sc
, sc
->result
, ctask
->itt
);
312 conn
->scsirsp_pdus_cnt
++;
313 iscsi_ctask_cleanup(conn
, ctask
);
319 * iscsi_data_rsp - SCSI Data-In Response processing
320 * @conn: iscsi connection
321 * @ctask: scsi command task
324 iscsi_data_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
327 struct iscsi_data_rsp
*rhdr
= (struct iscsi_data_rsp
*)conn
->in
.hdr
;
328 struct iscsi_session
*session
= conn
->session
;
329 int datasn
= be32_to_cpu(rhdr
->datasn
);
331 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
335 * setup Data-In byte counter (gets decremented..)
337 ctask
->data_count
= conn
->in
.datalen
;
339 if (conn
->in
.datalen
== 0)
342 if (ctask
->datasn
!= datasn
)
343 return ISCSI_ERR_DATASN
;
347 ctask
->data_offset
= be32_to_cpu(rhdr
->offset
);
348 if (ctask
->data_offset
+ conn
->in
.datalen
> ctask
->total_length
)
349 return ISCSI_ERR_DATA_OFFSET
;
351 if (rhdr
->flags
& ISCSI_FLAG_DATA_STATUS
) {
352 struct scsi_cmnd
*sc
= ctask
->sc
;
354 conn
->exp_statsn
= be32_to_cpu(rhdr
->statsn
) + 1;
355 if (rhdr
->flags
& ISCSI_FLAG_DATA_UNDERFLOW
) {
356 int res_count
= be32_to_cpu(rhdr
->residual_count
);
359 res_count
<= sc
->request_bufflen
) {
360 sc
->resid
= res_count
;
361 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
363 sc
->result
= (DID_BAD_TARGET
<< 16) |
365 } else if (rhdr
->flags
& ISCSI_FLAG_DATA_OVERFLOW
) {
366 sc
->resid
= be32_to_cpu(rhdr
->residual_count
);
367 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
369 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
372 conn
->datain_pdus_cnt
++;
377 * iscsi_solicit_data_init - initialize first Data-Out
378 * @conn: iscsi connection
379 * @ctask: scsi command task
383 * Initialize first Data-Out within this R2T sequence and finds
384 * proper data_offset within this SCSI command.
386 * This function is called with connection lock taken.
389 iscsi_solicit_data_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
390 struct iscsi_r2t_info
*r2t
)
392 struct iscsi_data
*hdr
;
393 struct iscsi_data_task
*dtask
;
394 struct scsi_cmnd
*sc
= ctask
->sc
;
396 dtask
= mempool_alloc(ctask
->datapool
, GFP_ATOMIC
);
399 memset(hdr
, 0, sizeof(struct iscsi_data
));
401 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
402 r2t
->solicit_datasn
++;
403 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
404 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
405 hdr
->itt
= ctask
->hdr
.itt
;
406 hdr
->exp_statsn
= r2t
->exp_statsn
;
407 hdr
->offset
= cpu_to_be32(r2t
->data_offset
);
408 if (r2t
->data_length
> conn
->max_xmit_dlength
) {
409 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
410 r2t
->data_count
= conn
->max_xmit_dlength
;
413 hton24(hdr
->dlength
, r2t
->data_length
);
414 r2t
->data_count
= r2t
->data_length
;
415 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
417 conn
->dataout_pdus_cnt
++;
421 iscsi_buf_init_virt(&r2t
->headbuf
, (char*)hdr
,
422 sizeof(struct iscsi_hdr
));
428 struct scatterlist
*sg
= sc
->request_buffer
;
431 for (i
= 0; i
< sc
->use_sg
; i
++, sg
+= 1) {
432 /* FIXME: prefetch ? */
433 if (sg_count
+ sg
->length
> r2t
->data_offset
) {
438 /* offset within this page */
439 page_offset
= r2t
->data_offset
- sg_count
;
441 /* fill in this buffer */
442 iscsi_buf_init_sg(&r2t
->sendbuf
, sg
);
443 r2t
->sendbuf
.sg
.offset
+= page_offset
;
444 r2t
->sendbuf
.sg
.length
-= page_offset
;
446 /* xmit logic will continue with next one */
450 sg_count
+= sg
->length
;
452 BUG_ON(r2t
->sg
== NULL
);
454 iscsi_buf_init_iov(&ctask
->sendbuf
,
455 (char*)sc
->request_buffer
+ r2t
->data_offset
,
458 list_add(&dtask
->item
, &ctask
->dataqueue
);
462 * iscsi_r2t_rsp - iSCSI R2T Response processing
463 * @conn: iscsi connection
464 * @ctask: scsi command task
467 iscsi_r2t_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
469 struct iscsi_r2t_info
*r2t
;
470 struct iscsi_session
*session
= conn
->session
;
471 struct iscsi_r2t_rsp
*rhdr
= (struct iscsi_r2t_rsp
*)conn
->in
.hdr
;
472 int r2tsn
= be32_to_cpu(rhdr
->r2tsn
);
476 return ISCSI_ERR_AHSLEN
;
478 if (conn
->in
.datalen
)
479 return ISCSI_ERR_DATALEN
;
481 if (ctask
->exp_r2tsn
&& ctask
->exp_r2tsn
!= r2tsn
)
482 return ISCSI_ERR_R2TSN
;
484 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
488 /* FIXME: use R2TSN to detect missing R2T */
490 /* fill-in new R2T associated with the task */
491 spin_lock(&session
->lock
);
492 if (!ctask
->sc
|| ctask
->mtask
||
493 session
->state
!= ISCSI_STATE_LOGGED_IN
) {
494 printk(KERN_INFO
"iscsi_tcp: dropping R2T itt %d in "
495 "recovery...\n", ctask
->itt
);
496 spin_unlock(&session
->lock
);
499 rc
= __kfifo_get(ctask
->r2tpool
.queue
, (void*)&r2t
, sizeof(void*));
502 r2t
->exp_statsn
= rhdr
->statsn
;
503 r2t
->data_length
= be32_to_cpu(rhdr
->data_length
);
504 if (r2t
->data_length
== 0 ||
505 r2t
->data_length
> session
->max_burst
) {
506 spin_unlock(&session
->lock
);
507 return ISCSI_ERR_DATALEN
;
510 r2t
->data_offset
= be32_to_cpu(rhdr
->data_offset
);
511 if (r2t
->data_offset
+ r2t
->data_length
> ctask
->total_length
) {
512 spin_unlock(&session
->lock
);
513 return ISCSI_ERR_DATALEN
;
516 r2t
->ttt
= rhdr
->ttt
; /* no flip */
517 r2t
->solicit_datasn
= 0;
519 iscsi_solicit_data_init(conn
, ctask
, r2t
);
521 ctask
->exp_r2tsn
= r2tsn
+ 1;
522 ctask
->xmstate
|= XMSTATE_SOL_HDR
;
523 __kfifo_put(ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*));
524 __kfifo_put(conn
->writequeue
, (void*)&ctask
, sizeof(void*));
526 scsi_queue_work(session
->host
, &conn
->xmitwork
);
527 conn
->r2t_pdus_cnt
++;
528 spin_unlock(&session
->lock
);
534 iscsi_hdr_recv(struct iscsi_conn
*conn
)
537 struct iscsi_hdr
*hdr
;
538 struct iscsi_cmd_task
*ctask
;
539 struct iscsi_session
*session
= conn
->session
;
540 uint32_t cdgst
, rdgst
= 0;
544 /* verify PDU length */
545 conn
->in
.datalen
= ntoh24(hdr
->dlength
);
546 if (conn
->in
.datalen
> conn
->max_recv_dlength
) {
547 printk(KERN_ERR
"iscsi_tcp: datalen %d > %d\n",
548 conn
->in
.datalen
, conn
->max_recv_dlength
);
549 return ISCSI_ERR_DATALEN
;
551 conn
->data_copied
= 0;
554 conn
->in
.ahslen
= hdr
->hlength
* 4;
555 conn
->in
.offset
+= conn
->in
.ahslen
;
556 conn
->in
.copy
-= conn
->in
.ahslen
;
557 if (conn
->in
.copy
< 0) {
558 printk(KERN_ERR
"iscsi_tcp: can't handle AHS with length "
559 "%d bytes\n", conn
->in
.ahslen
);
560 return ISCSI_ERR_AHSLEN
;
563 /* calculate read padding */
564 conn
->in
.padding
= conn
->in
.datalen
& (ISCSI_PAD_LEN
-1);
565 if (conn
->in
.padding
) {
566 conn
->in
.padding
= ISCSI_PAD_LEN
- conn
->in
.padding
;
567 debug_scsi("read padding %d bytes\n", conn
->in
.padding
);
570 if (conn
->hdrdgst_en
) {
571 struct scatterlist sg
;
573 sg_init_one(&sg
, (u8
*)hdr
,
574 sizeof(struct iscsi_hdr
) + conn
->in
.ahslen
);
575 crypto_digest_digest(conn
->rx_tfm
, &sg
, 1, (u8
*)&cdgst
);
576 rdgst
= *(uint32_t*)((char*)hdr
+ sizeof(struct iscsi_hdr
) +
578 if (cdgst
!= rdgst
) {
579 printk(KERN_ERR
"iscsi_tcp: itt %x: hdrdgst error "
580 "recv 0x%x calc 0x%x\n", conn
->in
.itt
, rdgst
,
582 return ISCSI_ERR_HDR_DGST
;
586 /* save opcode for later */
587 conn
->in
.opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
589 /* verify itt (itt encoding: age+cid+itt) */
590 if (hdr
->itt
!= cpu_to_be32(ISCSI_RESERVED_TAG
)) {
591 if ((hdr
->itt
& AGE_MASK
) !=
592 (session
->age
<< AGE_SHIFT
)) {
593 printk(KERN_ERR
"iscsi_tcp: received itt %x expected "
594 "session age (%x)\n", hdr
->itt
,
595 session
->age
& AGE_MASK
);
596 return ISCSI_ERR_BAD_ITT
;
599 if ((hdr
->itt
& CID_MASK
) != (conn
->id
<< CID_SHIFT
)) {
600 printk(KERN_ERR
"iscsi_tcp: received itt %x, expected "
601 "CID (%x)\n", hdr
->itt
, conn
->id
);
602 return ISCSI_ERR_BAD_ITT
;
604 conn
->in
.itt
= hdr
->itt
& ITT_MASK
;
606 conn
->in
.itt
= hdr
->itt
;
608 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
609 hdr
->opcode
, conn
->in
.offset
, conn
->in
.copy
,
610 conn
->in
.ahslen
, conn
->in
.datalen
);
612 if (conn
->in
.itt
< session
->cmds_max
) {
613 ctask
= (struct iscsi_cmd_task
*)session
->cmds
[conn
->in
.itt
];
616 printk(KERN_INFO
"iscsi_tcp: dropping ctask with "
617 "itt 0x%x\n", ctask
->itt
);
618 conn
->in
.datalen
= 0; /* force drop */
622 if (ctask
->sc
->SCp
.phase
!= session
->age
) {
623 printk(KERN_ERR
"iscsi_tcp: ctask's session age %d, "
624 "expected %d\n", ctask
->sc
->SCp
.phase
,
626 return ISCSI_ERR_SESSION_FAILED
;
629 conn
->in
.ctask
= ctask
;
631 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
632 hdr
->opcode
, conn
->id
, (long)ctask
->sc
,
633 ctask
->itt
, conn
->in
.datalen
);
635 switch(conn
->in
.opcode
) {
636 case ISCSI_OP_SCSI_CMD_RSP
:
637 BUG_ON((void*)ctask
!= ctask
->sc
->SCp
.ptr
);
638 if (!conn
->in
.datalen
)
639 rc
= iscsi_cmd_rsp(conn
, ctask
);
642 * got sense or response data; copying PDU
643 * Header to the connection's header
646 memcpy(&conn
->hdr
, hdr
,
647 sizeof(struct iscsi_hdr
));
649 case ISCSI_OP_SCSI_DATA_IN
:
650 BUG_ON((void*)ctask
!= ctask
->sc
->SCp
.ptr
);
651 /* save flags for non-exceptional status */
652 conn
->in
.flags
= hdr
->flags
;
653 /* save cmd_status for sense data */
654 conn
->in
.cmd_status
=
655 ((struct iscsi_data_rsp
*)hdr
)->cmd_status
;
656 rc
= iscsi_data_rsp(conn
, ctask
);
659 BUG_ON((void*)ctask
!= ctask
->sc
->SCp
.ptr
);
660 if (ctask
->sc
->sc_data_direction
== DMA_TO_DEVICE
)
661 rc
= iscsi_r2t_rsp(conn
, ctask
);
663 rc
= ISCSI_ERR_PROTO
;
666 rc
= ISCSI_ERR_BAD_OPCODE
;
669 } else if (conn
->in
.itt
>= ISCSI_MGMT_ITT_OFFSET
&&
670 conn
->in
.itt
< ISCSI_MGMT_ITT_OFFSET
+
671 session
->mgmtpool_max
) {
672 struct iscsi_mgmt_task
*mtask
= (struct iscsi_mgmt_task
*)
673 session
->mgmt_cmds
[conn
->in
.itt
-
674 ISCSI_MGMT_ITT_OFFSET
];
676 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
677 conn
->in
.opcode
, conn
->id
, mtask
->itt
,
680 switch(conn
->in
.opcode
) {
681 case ISCSI_OP_LOGIN_RSP
:
682 case ISCSI_OP_TEXT_RSP
:
683 case ISCSI_OP_LOGOUT_RSP
:
684 rc
= iscsi_check_assign_cmdsn(session
,
685 (struct iscsi_nopin
*)hdr
);
689 if (!conn
->in
.datalen
) {
690 rc
= iscsi_recv_pdu(conn
->cls_conn
, hdr
,
692 if (conn
->login_mtask
!= mtask
) {
693 spin_lock(&session
->lock
);
694 __kfifo_put(session
->mgmtpool
.queue
,
695 (void*)&mtask
, sizeof(void*));
696 spin_unlock(&session
->lock
);
700 case ISCSI_OP_SCSI_TMFUNC_RSP
:
701 rc
= iscsi_check_assign_cmdsn(session
,
702 (struct iscsi_nopin
*)hdr
);
706 if (conn
->in
.datalen
|| conn
->in
.ahslen
) {
707 rc
= ISCSI_ERR_PROTO
;
710 conn
->tmfrsp_pdus_cnt
++;
711 spin_lock(&session
->lock
);
712 if (conn
->tmabort_state
== TMABORT_INITIAL
) {
713 __kfifo_put(session
->mgmtpool
.queue
,
714 (void*)&mtask
, sizeof(void*));
715 conn
->tmabort_state
=
716 ((struct iscsi_tm_rsp
*)hdr
)->
717 response
== ISCSI_TMF_RSP_COMPLETE
?
718 TMABORT_SUCCESS
:TMABORT_FAILED
;
719 /* unblock eh_abort() */
720 wake_up(&conn
->ehwait
);
722 spin_unlock(&session
->lock
);
724 case ISCSI_OP_NOOP_IN
:
725 if (hdr
->ttt
!= ISCSI_RESERVED_TAG
) {
726 rc
= ISCSI_ERR_PROTO
;
729 rc
= iscsi_check_assign_cmdsn(session
,
730 (struct iscsi_nopin
*)hdr
);
733 conn
->exp_statsn
= be32_to_cpu(hdr
->statsn
) + 1;
735 if (!conn
->in
.datalen
) {
736 struct iscsi_mgmt_task
*mtask
;
738 rc
= iscsi_recv_pdu(conn
->cls_conn
, hdr
,
740 mtask
= (struct iscsi_mgmt_task
*)
741 session
->mgmt_cmds
[conn
->in
.itt
-
742 ISCSI_MGMT_ITT_OFFSET
];
743 if (conn
->login_mtask
!= mtask
) {
744 spin_lock(&session
->lock
);
745 __kfifo_put(session
->mgmtpool
.queue
,
746 (void*)&mtask
, sizeof(void*));
747 spin_unlock(&session
->lock
);
752 rc
= ISCSI_ERR_BAD_OPCODE
;
755 } else if (conn
->in
.itt
== ISCSI_RESERVED_TAG
) {
756 switch(conn
->in
.opcode
) {
757 case ISCSI_OP_NOOP_IN
:
758 if (!conn
->in
.datalen
) {
759 rc
= iscsi_check_assign_cmdsn(session
,
760 (struct iscsi_nopin
*)hdr
);
761 if (!rc
&& hdr
->ttt
!= ISCSI_RESERVED_TAG
)
762 rc
= iscsi_recv_pdu(conn
->cls_conn
,
765 rc
= ISCSI_ERR_PROTO
;
767 case ISCSI_OP_REJECT
:
768 /* we need sth like iscsi_reject_rsp()*/
769 case ISCSI_OP_ASYNC_EVENT
:
770 /* we need sth like iscsi_async_event_rsp() */
771 rc
= ISCSI_ERR_BAD_OPCODE
;
774 rc
= ISCSI_ERR_BAD_OPCODE
;
778 rc
= ISCSI_ERR_BAD_ITT
;
784 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
785 * @conn: iscsi connection
786 * @ctask: scsi command task
787 * @buf: buffer to copy to
788 * @buf_size: size of buffer
789 * @offset: offset within the buffer
792 * The function calls skb_copy_bits() and updates per-connection and
793 * per-cmd byte counters.
795 * Read counters (in bytes):
797 * conn->in.offset offset within in progress SKB
798 * conn->in.copy left to copy from in progress SKB
800 * conn->in.copied copied already from in progress SKB
801 * conn->data_copied copied already from in progress buffer
802 * ctask->sent total bytes sent up to the MidLayer
803 * ctask->data_count left to copy from in progress Data-In
804 * buf_left left to copy from in progress buffer
807 iscsi_ctask_copy(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
808 void *buf
, int buf_size
, int offset
)
810 int buf_left
= buf_size
- (conn
->data_copied
+ offset
);
811 int size
= min(conn
->in
.copy
, buf_left
);
814 size
= min(size
, ctask
->data_count
);
816 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
817 size
, conn
->in
.offset
, conn
->in
.copied
);
820 BUG_ON(ctask
->sent
+ size
> ctask
->total_length
);
822 rc
= skb_copy_bits(conn
->in
.skb
, conn
->in
.offset
,
823 (char*)buf
+ (offset
+ conn
->data_copied
), size
);
824 /* must fit into skb->len */
827 conn
->in
.offset
+= size
;
828 conn
->in
.copy
-= size
;
829 conn
->in
.copied
+= size
;
830 conn
->data_copied
+= size
;
832 ctask
->data_count
-= size
;
834 BUG_ON(conn
->in
.copy
< 0);
835 BUG_ON(ctask
->data_count
< 0);
837 if (buf_size
!= (conn
->data_copied
+ offset
)) {
838 if (!ctask
->data_count
) {
839 BUG_ON(buf_size
- conn
->data_copied
< 0);
840 /* done with this PDU */
841 return buf_size
- conn
->data_copied
;
846 /* done with this buffer or with both - PDU and buffer */
847 conn
->data_copied
= 0;
852 * iscsi_tcp_copy - copy skb bits to the destanation buffer
853 * @conn: iscsi connection
854 * @buf: buffer to copy to
855 * @buf_size: number of bytes to copy
858 * The function calls skb_copy_bits() and updates per-connection
862 iscsi_tcp_copy(struct iscsi_conn
*conn
, void *buf
, int buf_size
)
864 int buf_left
= buf_size
- conn
->data_copied
;
865 int size
= min(conn
->in
.copy
, buf_left
);
868 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
869 size
, conn
->in
.offset
, conn
->data_copied
);
872 rc
= skb_copy_bits(conn
->in
.skb
, conn
->in
.offset
,
873 (char*)buf
+ conn
->data_copied
, size
);
876 conn
->in
.offset
+= size
;
877 conn
->in
.copy
-= size
;
878 conn
->in
.copied
+= size
;
879 conn
->data_copied
+= size
;
881 if (buf_size
!= conn
->data_copied
)
888 partial_sg_digest_update(struct iscsi_conn
*conn
, struct scatterlist
*sg
,
889 int offset
, int length
)
891 struct scatterlist temp
;
893 memcpy(&temp
, sg
, sizeof(struct scatterlist
));
894 temp
.offset
= offset
;
895 temp
.length
= length
;
896 crypto_digest_update(conn
->data_rx_tfm
, &temp
, 1);
900 iscsi_recv_digest_update(struct iscsi_conn
*conn
, char* buf
, int len
)
902 struct scatterlist tmp
;
904 sg_init_one(&tmp
, buf
, len
);
905 crypto_digest_update(conn
->data_rx_tfm
, &tmp
, 1);
908 static int iscsi_scsi_data_in(struct iscsi_conn
*conn
)
910 struct iscsi_cmd_task
*ctask
= conn
->in
.ctask
;
911 struct scsi_cmnd
*sc
= ctask
->sc
;
912 struct scatterlist
*sg
;
913 int i
, offset
, rc
= 0;
915 BUG_ON((void*)ctask
!= sc
->SCp
.ptr
);
918 * copying Data-In into the Scsi_Cmnd
921 i
= ctask
->data_count
;
922 rc
= iscsi_ctask_copy(conn
, ctask
, sc
->request_buffer
,
923 sc
->request_bufflen
, ctask
->data_offset
);
926 if (conn
->datadgst_en
)
927 iscsi_recv_digest_update(conn
, sc
->request_buffer
, i
);
932 offset
= ctask
->data_offset
;
933 sg
= sc
->request_buffer
;
935 if (ctask
->data_offset
)
936 for (i
= 0; i
< ctask
->sg_count
; i
++)
937 offset
-= sg
[i
].length
;
938 /* we've passed through partial sg*/
942 for (i
= ctask
->sg_count
; i
< sc
->use_sg
; i
++) {
945 dest
= kmap_atomic(sg
[i
].page
, KM_SOFTIRQ0
);
946 rc
= iscsi_ctask_copy(conn
, ctask
, dest
+ sg
[i
].offset
,
947 sg
[i
].length
, offset
);
948 kunmap_atomic(dest
, KM_SOFTIRQ0
);
950 /* continue with the next SKB/PDU */
953 if (conn
->datadgst_en
) {
955 crypto_digest_update(conn
->data_rx_tfm
,
958 partial_sg_digest_update(conn
, &sg
[i
],
959 sg
[i
].offset
+ offset
,
960 sg
[i
].length
- offset
);
966 if (!ctask
->data_count
) {
967 if (rc
&& conn
->datadgst_en
)
969 * data-in is complete, but buffer not...
971 partial_sg_digest_update(conn
, &sg
[i
],
972 sg
[i
].offset
, sg
[i
].length
-rc
);
980 BUG_ON(ctask
->data_count
);
983 /* check for non-exceptional status */
984 if (conn
->in
.flags
& ISCSI_FLAG_DATA_STATUS
) {
985 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
986 (long)sc
, sc
->result
, ctask
->itt
);
987 conn
->scsirsp_pdus_cnt
++;
988 iscsi_ctask_cleanup(conn
, ctask
);
996 iscsi_data_recv(struct iscsi_conn
*conn
)
998 struct iscsi_session
*session
= conn
->session
;
1001 switch(conn
->in
.opcode
) {
1002 case ISCSI_OP_SCSI_DATA_IN
:
1003 rc
= iscsi_scsi_data_in(conn
);
1005 case ISCSI_OP_SCSI_CMD_RSP
: {
1008 * copying the entire Data Segment.
1010 if (iscsi_tcp_copy(conn
, conn
->data
, conn
->in
.datalen
)) {
1018 conn
->in
.hdr
= &conn
->hdr
;
1019 conn
->senselen
= (conn
->data
[0] << 8) | conn
->data
[1];
1020 rc
= iscsi_cmd_rsp(conn
, conn
->in
.ctask
);
1021 if (!rc
&& conn
->datadgst_en
)
1022 iscsi_recv_digest_update(conn
, conn
->data
,
1026 case ISCSI_OP_TEXT_RSP
:
1027 case ISCSI_OP_LOGIN_RSP
:
1028 case ISCSI_OP_NOOP_IN
: {
1029 struct iscsi_mgmt_task
*mtask
= NULL
;
1031 if (conn
->in
.itt
!= ISCSI_RESERVED_TAG
)
1032 mtask
= (struct iscsi_mgmt_task
*)
1033 session
->mgmt_cmds
[conn
->in
.itt
-
1034 ISCSI_MGMT_ITT_OFFSET
];
1037 * Collect data segment to the connection's data
1040 if (iscsi_tcp_copy(conn
, conn
->data
, conn
->in
.datalen
)) {
1045 rc
= iscsi_recv_pdu(conn
->cls_conn
, conn
->in
.hdr
,
1046 conn
->data
, conn
->in
.datalen
);
1048 if (!rc
&& conn
->datadgst_en
&&
1049 conn
->in
.opcode
!= ISCSI_OP_LOGIN_RSP
)
1050 iscsi_recv_digest_update(conn
, conn
->data
,
1053 if (mtask
&& conn
->login_mtask
!= mtask
) {
1054 spin_lock(&session
->lock
);
1055 __kfifo_put(session
->mgmtpool
.queue
, (void*)&mtask
,
1057 spin_unlock(&session
->lock
);
1061 case ISCSI_OP_ASYNC_EVENT
:
1062 case ISCSI_OP_REJECT
:
1071 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1072 * @rd_desc: read descriptor
1073 * @skb: socket buffer
1074 * @offset: offset in skb
1075 * @len: skb->len - offset
1078 iscsi_tcp_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
1079 unsigned int offset
, size_t len
)
1082 struct iscsi_conn
*conn
= rd_desc
->arg
.data
;
1084 char pad
[ISCSI_PAD_LEN
];
1085 struct scatterlist sg
;
1088 * Save current SKB and its offset in the corresponding
1089 * connection context.
1091 conn
->in
.copy
= skb
->len
- offset
;
1092 conn
->in
.offset
= offset
;
1094 conn
->in
.len
= conn
->in
.copy
;
1095 BUG_ON(conn
->in
.copy
<= 0);
1096 debug_tcp("in %d bytes\n", conn
->in
.copy
);
1099 conn
->in
.copied
= 0;
1102 if (unlikely(conn
->suspend_rx
)) {
1103 debug_tcp("conn %d Rx suspended!\n", conn
->id
);
1107 if (conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
||
1108 conn
->in_progress
== IN_PROGRESS_HEADER_GATHER
) {
1109 rc
= iscsi_hdr_extract(conn
);
1114 iscsi_conn_failure(conn
, rc
);
1120 * Verify and process incoming PDU header.
1122 rc
= iscsi_hdr_recv(conn
);
1123 if (!rc
&& conn
->in
.datalen
) {
1124 if (conn
->datadgst_en
) {
1125 BUG_ON(!conn
->data_rx_tfm
);
1126 crypto_digest_init(conn
->data_rx_tfm
);
1128 conn
->in_progress
= IN_PROGRESS_DATA_RECV
;
1130 iscsi_conn_failure(conn
, rc
);
1135 if (conn
->in_progress
== IN_PROGRESS_DDIGEST_RECV
) {
1136 uint32_t recv_digest
;
1137 debug_tcp("extra data_recv offset %d copy %d\n",
1138 conn
->in
.offset
, conn
->in
.copy
);
1139 skb_copy_bits(conn
->in
.skb
, conn
->in
.offset
,
1141 conn
->in
.offset
+= 4;
1143 if (recv_digest
!= conn
->in
.datadgst
) {
1144 debug_tcp("iscsi_tcp: data digest error!"
1145 "0x%x != 0x%x\n", recv_digest
,
1147 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
1150 debug_tcp("iscsi_tcp: data digest match!"
1151 "0x%x == 0x%x\n", recv_digest
,
1153 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
1157 if (conn
->in_progress
== IN_PROGRESS_DATA_RECV
&& conn
->in
.copy
) {
1159 debug_tcp("data_recv offset %d copy %d\n",
1160 conn
->in
.offset
, conn
->in
.copy
);
1162 rc
= iscsi_data_recv(conn
);
1164 if (rc
== -EAGAIN
) {
1165 rd_desc
->count
= conn
->in
.datalen
-
1166 conn
->in
.ctask
->data_count
;
1169 iscsi_conn_failure(conn
, rc
);
1172 conn
->in
.copy
-= conn
->in
.padding
;
1173 conn
->in
.offset
+= conn
->in
.padding
;
1174 if (conn
->datadgst_en
) {
1175 if (conn
->in
.padding
) {
1176 debug_tcp("padding -> %d\n", conn
->in
.padding
);
1177 memset(pad
, 0, conn
->in
.padding
);
1178 sg_init_one(&sg
, pad
, conn
->in
.padding
);
1179 crypto_digest_update(conn
->data_rx_tfm
, &sg
, 1);
1181 crypto_digest_final(conn
->data_rx_tfm
,
1182 (u8
*) & conn
->in
.datadgst
);
1183 debug_tcp("rx digest 0x%x\n", conn
->in
.datadgst
);
1184 conn
->in_progress
= IN_PROGRESS_DDIGEST_RECV
;
1186 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
1189 debug_tcp("f, processed %d from out of %d padding %d\n",
1190 conn
->in
.offset
- offset
, (int)len
, conn
->in
.padding
);
1191 BUG_ON(conn
->in
.offset
- offset
> len
);
1193 if (conn
->in
.offset
- offset
!= len
) {
1194 debug_tcp("continue to process %d bytes\n",
1195 (int)len
- (conn
->in
.offset
- offset
));
1200 processed
= conn
->in
.offset
- offset
;
1201 BUG_ON(processed
== 0);
1205 processed
= conn
->in
.offset
- offset
;
1206 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1207 processed
, (int)len
, (int)rd_desc
->count
);
1208 BUG_ON(processed
== 0);
1209 BUG_ON(processed
> len
);
1211 conn
->rxdata_octets
+= processed
;
1216 iscsi_tcp_data_ready(struct sock
*sk
, int flag
)
1218 struct iscsi_conn
*conn
= sk
->sk_user_data
;
1219 read_descriptor_t rd_desc
;
1221 read_lock(&sk
->sk_callback_lock
);
1223 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1224 rd_desc
.arg
.data
= conn
;
1226 tcp_read_sock(sk
, &rd_desc
, iscsi_tcp_data_recv
);
1228 read_unlock(&sk
->sk_callback_lock
);
1232 iscsi_tcp_state_change(struct sock
*sk
)
1234 struct iscsi_conn
*conn
;
1235 struct iscsi_session
*session
;
1236 void (*old_state_change
)(struct sock
*);
1238 read_lock(&sk
->sk_callback_lock
);
1240 conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1241 session
= conn
->session
;
1243 if ((sk
->sk_state
== TCP_CLOSE_WAIT
||
1244 sk
->sk_state
== TCP_CLOSE
) &&
1245 !atomic_read(&sk
->sk_rmem_alloc
)) {
1246 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1247 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1250 old_state_change
= conn
->old_state_change
;
1252 read_unlock(&sk
->sk_callback_lock
);
1254 old_state_change(sk
);
1258 * iscsi_write_space - Called when more output buffer space is available
1259 * @sk: socket space is available for
1262 iscsi_write_space(struct sock
*sk
)
1264 struct iscsi_conn
*conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1265 conn
->old_write_space(sk
);
1266 debug_tcp("iscsi_write_space: cid %d\n", conn
->id
);
1267 clear_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
1268 scsi_queue_work(conn
->session
->host
, &conn
->xmitwork
);
1272 iscsi_conn_set_callbacks(struct iscsi_conn
*conn
)
1274 struct sock
*sk
= conn
->sock
->sk
;
1276 /* assign new callbacks */
1277 write_lock_bh(&sk
->sk_callback_lock
);
1278 sk
->sk_user_data
= conn
;
1279 conn
->old_data_ready
= sk
->sk_data_ready
;
1280 conn
->old_state_change
= sk
->sk_state_change
;
1281 conn
->old_write_space
= sk
->sk_write_space
;
1282 sk
->sk_data_ready
= iscsi_tcp_data_ready
;
1283 sk
->sk_state_change
= iscsi_tcp_state_change
;
1284 sk
->sk_write_space
= iscsi_write_space
;
1285 write_unlock_bh(&sk
->sk_callback_lock
);
1289 iscsi_conn_restore_callbacks(struct iscsi_conn
*conn
)
1291 struct sock
*sk
= conn
->sock
->sk
;
1293 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1294 write_lock_bh(&sk
->sk_callback_lock
);
1295 sk
->sk_user_data
= NULL
;
1296 sk
->sk_data_ready
= conn
->old_data_ready
;
1297 sk
->sk_state_change
= conn
->old_state_change
;
1298 sk
->sk_write_space
= conn
->old_write_space
;
1299 sk
->sk_no_check
= 0;
1300 write_unlock_bh(&sk
->sk_callback_lock
);
1304 * iscsi_send - generic send routine
1305 * @sk: kernel's socket
1306 * @buf: buffer to write from
1307 * @size: actual size to write
1308 * @flags: socket's flags
1311 iscsi_send(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int size
, int flags
)
1313 struct socket
*sk
= conn
->sock
;
1314 int offset
= buf
->sg
.offset
+ buf
->sent
;
1317 * if we got use_sg=0 or are sending something we kmallocd
1318 * then we did not have to do kmap (kmap returns page_address)
1320 * if we got use_sg > 0, but had to drop down, we do not
1321 * set clustering so this should only happen for that
1324 if (buf
->use_sendmsg
)
1325 return sock_no_sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1327 return conn
->sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1331 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1332 * @conn: iscsi connection
1333 * @buf: buffer to write from
1334 * @datalen: lenght of data to be sent after the header
1340 iscsi_sendhdr(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int datalen
)
1342 int flags
= 0; /* MSG_DONTWAIT; */
1345 size
= buf
->sg
.length
- buf
->sent
;
1346 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1347 if (buf
->sent
+ size
!= buf
->sg
.length
|| datalen
)
1350 res
= iscsi_send(conn
, buf
, size
, flags
);
1351 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size
, buf
->sent
, res
);
1353 conn
->txdata_octets
+= res
;
1358 } else if (res
== -EAGAIN
) {
1359 conn
->sendpage_failures_cnt
++;
1360 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
1361 } else if (res
== -EPIPE
)
1362 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1368 * iscsi_sendpage - send one page of iSCSI Data-Out.
1369 * @conn: iscsi connection
1370 * @buf: buffer to write from
1371 * @count: remaining data
1372 * @sent: number of bytes sent
1378 iscsi_sendpage(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
1379 int *count
, int *sent
)
1381 int flags
= 0; /* MSG_DONTWAIT; */
1384 size
= buf
->sg
.length
- buf
->sent
;
1385 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1388 if (buf
->sent
+ size
!= buf
->sg
.length
|| *count
!= size
)
1391 res
= iscsi_send(conn
, buf
, size
, flags
);
1392 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1393 size
, buf
->sent
, *count
, *sent
, res
);
1395 conn
->txdata_octets
+= res
;
1402 } else if (res
== -EAGAIN
) {
1403 conn
->sendpage_failures_cnt
++;
1404 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
1405 } else if (res
== -EPIPE
)
1406 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1412 iscsi_data_digest_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1414 BUG_ON(!conn
->data_tx_tfm
);
1415 crypto_digest_init(conn
->data_tx_tfm
);
1416 ctask
->digest_count
= 4;
1420 iscsi_digest_final_send(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1421 struct iscsi_buf
*buf
, uint32_t *digest
, int final
)
1427 crypto_digest_final(conn
->data_tx_tfm
, (u8
*)digest
);
1429 iscsi_buf_init_virt(buf
, (char*)digest
, 4);
1430 rc
= iscsi_sendpage(conn
, buf
, &ctask
->digest_count
, &sent
);
1432 ctask
->datadigest
= *digest
;
1433 ctask
->xmstate
|= XMSTATE_DATA_DIGEST
;
1435 ctask
->digest_count
= 4;
1440 * iscsi_solicit_data_cont - initialize next Data-Out
1441 * @conn: iscsi connection
1442 * @ctask: scsi command task
1444 * @left: bytes left to transfer
1447 * Initialize next Data-Out within this R2T sequence and continue
1448 * to process next Scatter-Gather element(if any) of this SCSI command.
1450 * Called under connection lock.
1453 iscsi_solicit_data_cont(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1454 struct iscsi_r2t_info
*r2t
, int left
)
1456 struct iscsi_data
*hdr
;
1457 struct iscsi_data_task
*dtask
;
1458 struct scsi_cmnd
*sc
= ctask
->sc
;
1461 dtask
= mempool_alloc(ctask
->datapool
, GFP_ATOMIC
);
1464 memset(hdr
, 0, sizeof(struct iscsi_data
));
1465 hdr
->ttt
= r2t
->ttt
;
1466 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
1467 r2t
->solicit_datasn
++;
1468 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
1469 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
1470 hdr
->itt
= ctask
->hdr
.itt
;
1471 hdr
->exp_statsn
= r2t
->exp_statsn
;
1472 new_offset
= r2t
->data_offset
+ r2t
->sent
;
1473 hdr
->offset
= cpu_to_be32(new_offset
);
1474 if (left
> conn
->max_xmit_dlength
) {
1475 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
1476 r2t
->data_count
= conn
->max_xmit_dlength
;
1478 hton24(hdr
->dlength
, left
);
1479 r2t
->data_count
= left
;
1480 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1482 conn
->dataout_pdus_cnt
++;
1484 iscsi_buf_init_virt(&r2t
->headbuf
, (char*)hdr
,
1485 sizeof(struct iscsi_hdr
));
1489 if (sc
->use_sg
&& !iscsi_buf_left(&r2t
->sendbuf
)) {
1490 BUG_ON(ctask
->bad_sg
== r2t
->sg
);
1491 iscsi_buf_init_sg(&r2t
->sendbuf
, r2t
->sg
);
1494 iscsi_buf_init_iov(&ctask
->sendbuf
,
1495 (char*)sc
->request_buffer
+ new_offset
,
1498 list_add(&dtask
->item
, &ctask
->dataqueue
);
1502 iscsi_unsolicit_data_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1504 struct iscsi_data
*hdr
;
1505 struct iscsi_data_task
*dtask
;
1507 dtask
= mempool_alloc(ctask
->datapool
, GFP_ATOMIC
);
1510 memset(hdr
, 0, sizeof(struct iscsi_data
));
1511 hdr
->ttt
= cpu_to_be32(ISCSI_RESERVED_TAG
);
1512 hdr
->datasn
= cpu_to_be32(ctask
->unsol_datasn
);
1513 ctask
->unsol_datasn
++;
1514 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
1515 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
1516 hdr
->itt
= ctask
->hdr
.itt
;
1517 hdr
->exp_statsn
= cpu_to_be32(conn
->exp_statsn
);
1518 hdr
->offset
= cpu_to_be32(ctask
->total_length
-
1519 ctask
->r2t_data_count
-
1520 ctask
->unsol_count
);
1521 if (ctask
->unsol_count
> conn
->max_xmit_dlength
) {
1522 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
1523 ctask
->data_count
= conn
->max_xmit_dlength
;
1526 hton24(hdr
->dlength
, ctask
->unsol_count
);
1527 ctask
->data_count
= ctask
->unsol_count
;
1528 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1531 iscsi_buf_init_virt(&ctask
->headbuf
, (char*)hdr
,
1532 sizeof(struct iscsi_hdr
));
1534 list_add(&dtask
->item
, &ctask
->dataqueue
);
1536 ctask
->dtask
= dtask
;
1540 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1541 * @conn: iscsi connection
1542 * @ctask: scsi command task
1546 iscsi_cmd_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1547 struct scsi_cmnd
*sc
)
1549 struct iscsi_session
*session
= conn
->session
;
1551 BUG_ON(__kfifo_len(ctask
->r2tqueue
));
1555 ctask
->hdr
.opcode
= ISCSI_OP_SCSI_CMD
;
1556 ctask
->hdr
.flags
= ISCSI_ATTR_SIMPLE
;
1557 int_to_scsilun(sc
->device
->lun
, (struct scsi_lun
*)ctask
->hdr
.lun
);
1558 ctask
->hdr
.itt
= ctask
->itt
| (conn
->id
<< CID_SHIFT
) |
1559 (session
->age
<< AGE_SHIFT
);
1560 ctask
->hdr
.data_length
= cpu_to_be32(sc
->request_bufflen
);
1561 ctask
->hdr
.cmdsn
= cpu_to_be32(session
->cmdsn
); session
->cmdsn
++;
1562 ctask
->hdr
.exp_statsn
= cpu_to_be32(conn
->exp_statsn
);
1563 memcpy(ctask
->hdr
.cdb
, sc
->cmnd
, sc
->cmd_len
);
1564 memset(&ctask
->hdr
.cdb
[sc
->cmd_len
], 0, MAX_COMMAND_SIZE
- sc
->cmd_len
);
1566 ctask
->mtask
= NULL
;
1568 ctask
->sg_count
= 0;
1570 ctask
->total_length
= sc
->request_bufflen
;
1572 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
1573 ctask
->exp_r2tsn
= 0;
1574 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_WRITE
;
1575 BUG_ON(ctask
->total_length
== 0);
1577 struct scatterlist
*sg
= sc
->request_buffer
;
1579 iscsi_buf_init_sg(&ctask
->sendbuf
,
1580 &sg
[ctask
->sg_count
++]);
1582 ctask
->bad_sg
= sg
+ sc
->use_sg
;
1584 iscsi_buf_init_iov(&ctask
->sendbuf
, sc
->request_buffer
,
1585 sc
->request_bufflen
);
1591 * imm_count bytes to be sent right after
1594 * unsol_count bytes(as Data-Out) to be sent
1595 * without R2T ack right after
1598 * r2t_data_count bytes to be sent via R2T ack's
1600 * pad_count bytes to be sent as zero-padding
1602 ctask
->imm_count
= 0;
1603 ctask
->unsol_count
= 0;
1604 ctask
->unsol_datasn
= 0;
1605 ctask
->xmstate
= XMSTATE_W_HDR
;
1606 /* calculate write padding */
1607 ctask
->pad_count
= ctask
->total_length
& (ISCSI_PAD_LEN
-1);
1608 if (ctask
->pad_count
) {
1609 ctask
->pad_count
= ISCSI_PAD_LEN
- ctask
->pad_count
;
1610 debug_scsi("write padding %d bytes\n",
1612 ctask
->xmstate
|= XMSTATE_W_PAD
;
1614 if (session
->imm_data_en
) {
1615 if (ctask
->total_length
>= session
->first_burst
)
1616 ctask
->imm_count
= min(session
->first_burst
,
1617 conn
->max_xmit_dlength
);
1619 ctask
->imm_count
= min(ctask
->total_length
,
1620 conn
->max_xmit_dlength
);
1621 hton24(ctask
->hdr
.dlength
, ctask
->imm_count
);
1622 ctask
->xmstate
|= XMSTATE_IMM_DATA
;
1624 zero_data(ctask
->hdr
.dlength
);
1626 if (!session
->initial_r2t_en
)
1627 ctask
->unsol_count
= min(session
->first_burst
,
1628 ctask
->total_length
) - ctask
->imm_count
;
1629 if (!ctask
->unsol_count
)
1630 /* No unsolicit Data-Out's */
1631 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_FINAL
;
1633 ctask
->xmstate
|= XMSTATE_UNS_HDR
| XMSTATE_UNS_INIT
;
1635 ctask
->r2t_data_count
= ctask
->total_length
-
1639 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1641 ctask
->itt
, ctask
->total_length
, ctask
->imm_count
,
1642 ctask
->unsol_count
, ctask
->r2t_data_count
);
1644 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_FINAL
;
1645 if (sc
->sc_data_direction
== DMA_FROM_DEVICE
)
1646 ctask
->hdr
.flags
|= ISCSI_FLAG_CMD_READ
;
1648 ctask
->xmstate
= XMSTATE_R_HDR
;
1649 zero_data(ctask
->hdr
.dlength
);
1652 iscsi_buf_init_virt(&ctask
->headbuf
, (char*)&ctask
->hdr
,
1653 sizeof(struct iscsi_hdr
));
1654 conn
->scsicmd_pdus_cnt
++;
1658 * iscsi_mtask_xmit - xmit management(immediate) task
1659 * @conn: iscsi connection
1660 * @mtask: task management task
1663 * The function can return -EAGAIN in which case caller must
1664 * call it again later, or recover. '0' return code means successful
1667 * Management xmit state machine consists of two states:
1668 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1669 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1672 iscsi_mtask_xmit(struct iscsi_conn
*conn
, struct iscsi_mgmt_task
*mtask
)
1675 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1676 conn
->id
, mtask
->xmstate
, mtask
->itt
);
1678 if (mtask
->xmstate
& XMSTATE_IMM_HDR
) {
1679 mtask
->xmstate
&= ~XMSTATE_IMM_HDR
;
1680 if (mtask
->data_count
)
1681 mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1682 if (conn
->c_stage
!= ISCSI_CONN_INITIAL_STAGE
&&
1683 conn
->stop_stage
!= STOP_CONN_RECOVER
&&
1685 iscsi_hdr_digest(conn
, &mtask
->headbuf
,
1686 (u8
*)mtask
->hdrext
);
1687 if (iscsi_sendhdr(conn
, &mtask
->headbuf
, mtask
->data_count
)) {
1688 mtask
->xmstate
|= XMSTATE_IMM_HDR
;
1689 if (mtask
->data_count
)
1690 mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1695 if (mtask
->xmstate
& XMSTATE_IMM_DATA
) {
1696 BUG_ON(!mtask
->data_count
);
1697 mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1698 /* FIXME: implement.
1699 * Virtual buffer could be spreaded across multiple pages...
1702 if (iscsi_sendpage(conn
, &mtask
->sendbuf
,
1703 &mtask
->data_count
, &mtask
->sent
)) {
1704 mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1707 } while (mtask
->data_count
);
1710 BUG_ON(mtask
->xmstate
!= XMSTATE_IDLE
);
1715 handle_xmstate_r_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1717 ctask
->xmstate
&= ~XMSTATE_R_HDR
;
1718 if (conn
->hdrdgst_en
)
1719 iscsi_hdr_digest(conn
, &ctask
->headbuf
, (u8
*)ctask
->hdrext
);
1720 if (!iscsi_sendhdr(conn
, &ctask
->headbuf
, 0)) {
1721 BUG_ON(ctask
->xmstate
!= XMSTATE_IDLE
);
1722 return 0; /* wait for Data-In */
1724 ctask
->xmstate
|= XMSTATE_R_HDR
;
1729 handle_xmstate_w_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1731 ctask
->xmstate
&= ~XMSTATE_W_HDR
;
1732 if (conn
->hdrdgst_en
)
1733 iscsi_hdr_digest(conn
, &ctask
->headbuf
, (u8
*)ctask
->hdrext
);
1734 if (iscsi_sendhdr(conn
, &ctask
->headbuf
, ctask
->imm_count
)) {
1735 ctask
->xmstate
|= XMSTATE_W_HDR
;
1742 handle_xmstate_data_digest(struct iscsi_conn
*conn
,
1743 struct iscsi_cmd_task
*ctask
)
1745 ctask
->xmstate
&= ~XMSTATE_DATA_DIGEST
;
1746 debug_tcp("resent data digest 0x%x\n", ctask
->datadigest
);
1747 if (iscsi_digest_final_send(conn
, ctask
, &ctask
->immbuf
,
1748 &ctask
->datadigest
, 0)) {
1749 ctask
->xmstate
|= XMSTATE_DATA_DIGEST
;
1750 debug_tcp("resent data digest 0x%x fail!\n",
1758 handle_xmstate_imm_data(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1760 BUG_ON(!ctask
->imm_count
);
1761 ctask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1763 if (conn
->datadgst_en
) {
1764 iscsi_data_digest_init(conn
, ctask
);
1765 ctask
->immdigest
= 0;
1769 if (iscsi_sendpage(conn
, &ctask
->sendbuf
, &ctask
->imm_count
,
1771 ctask
->xmstate
|= XMSTATE_IMM_DATA
;
1772 if (conn
->datadgst_en
) {
1773 crypto_digest_final(conn
->data_tx_tfm
,
1774 (u8
*)&ctask
->immdigest
);
1775 debug_tcp("tx imm sendpage fail 0x%x\n",
1780 if (conn
->datadgst_en
)
1781 crypto_digest_update(conn
->data_tx_tfm
,
1782 &ctask
->sendbuf
.sg
, 1);
1784 if (!ctask
->imm_count
)
1786 iscsi_buf_init_sg(&ctask
->sendbuf
,
1787 &ctask
->sg
[ctask
->sg_count
++]);
1790 if (conn
->datadgst_en
&& !(ctask
->xmstate
& XMSTATE_W_PAD
)) {
1791 if (iscsi_digest_final_send(conn
, ctask
, &ctask
->immbuf
,
1792 &ctask
->immdigest
, 1)) {
1793 debug_tcp("sending imm digest 0x%x fail!\n",
1797 debug_tcp("sending imm digest 0x%x\n", ctask
->immdigest
);
1804 handle_xmstate_uns_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1806 struct iscsi_data_task
*dtask
;
1808 ctask
->xmstate
|= XMSTATE_UNS_DATA
;
1809 if (ctask
->xmstate
& XMSTATE_UNS_INIT
) {
1810 iscsi_unsolicit_data_init(conn
, ctask
);
1811 BUG_ON(!ctask
->dtask
);
1812 dtask
= ctask
->dtask
;
1813 if (conn
->hdrdgst_en
)
1814 iscsi_hdr_digest(conn
, &ctask
->headbuf
,
1815 (u8
*)dtask
->hdrext
);
1816 ctask
->xmstate
&= ~XMSTATE_UNS_INIT
;
1818 if (iscsi_sendhdr(conn
, &ctask
->headbuf
, ctask
->data_count
)) {
1819 ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1820 ctask
->xmstate
|= XMSTATE_UNS_HDR
;
1824 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1825 ctask
->itt
, ctask
->unsol_count
, ctask
->sent
);
1830 handle_xmstate_uns_data(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1832 struct iscsi_data_task
*dtask
= ctask
->dtask
;
1834 BUG_ON(!ctask
->data_count
);
1835 ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1837 if (conn
->datadgst_en
) {
1838 iscsi_data_digest_init(conn
, ctask
);
1843 int start
= ctask
->sent
;
1845 if (iscsi_sendpage(conn
, &ctask
->sendbuf
, &ctask
->data_count
,
1847 ctask
->unsol_count
-= ctask
->sent
- start
;
1848 ctask
->xmstate
|= XMSTATE_UNS_DATA
;
1849 /* will continue with this ctask later.. */
1850 if (conn
->datadgst_en
) {
1851 crypto_digest_final(conn
->data_tx_tfm
,
1852 (u8
*)&dtask
->digest
);
1853 debug_tcp("tx uns data fail 0x%x\n",
1859 BUG_ON(ctask
->sent
> ctask
->total_length
);
1860 ctask
->unsol_count
-= ctask
->sent
- start
;
1863 * XXX:we may run here with un-initial sendbuf.
1866 if (conn
->datadgst_en
&& ctask
->sent
- start
> 0)
1867 crypto_digest_update(conn
->data_tx_tfm
,
1868 &ctask
->sendbuf
.sg
, 1);
1870 if (!ctask
->data_count
)
1872 iscsi_buf_init_sg(&ctask
->sendbuf
,
1873 &ctask
->sg
[ctask
->sg_count
++]);
1875 BUG_ON(ctask
->unsol_count
< 0);
1878 * Done with the Data-Out. Next, check if we need
1879 * to send another unsolicited Data-Out.
1881 if (ctask
->unsol_count
) {
1882 if (conn
->datadgst_en
) {
1883 if (iscsi_digest_final_send(conn
, ctask
,
1885 &dtask
->digest
, 1)) {
1886 debug_tcp("send uns digest 0x%x fail\n",
1890 debug_tcp("sending uns digest 0x%x, more uns\n",
1893 ctask
->xmstate
|= XMSTATE_UNS_INIT
;
1897 if (conn
->datadgst_en
&& !(ctask
->xmstate
& XMSTATE_W_PAD
)) {
1898 if (iscsi_digest_final_send(conn
, ctask
,
1900 &dtask
->digest
, 1)) {
1901 debug_tcp("send last uns digest 0x%x fail\n",
1905 debug_tcp("sending uns digest 0x%x\n",dtask
->digest
);
1912 handle_xmstate_sol_data(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1914 struct iscsi_session
*session
= conn
->session
;
1915 struct iscsi_r2t_info
*r2t
= ctask
->r2t
;
1916 struct iscsi_data_task
*dtask
= r2t
->dtask
;
1919 ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
1920 ctask
->dtask
= dtask
;
1922 if (conn
->datadgst_en
) {
1923 iscsi_data_digest_init(conn
, ctask
);
1928 * send Data-Out whitnin this R2T sequence.
1930 if (!r2t
->data_count
)
1933 if (iscsi_sendpage(conn
, &r2t
->sendbuf
, &r2t
->data_count
, &r2t
->sent
)) {
1934 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1935 /* will continue with this ctask later.. */
1936 if (conn
->datadgst_en
) {
1937 crypto_digest_final(conn
->data_tx_tfm
,
1938 (u8
*)&dtask
->digest
);
1939 debug_tcp("r2t data send fail 0x%x\n", dtask
->digest
);
1944 BUG_ON(r2t
->data_count
< 0);
1945 if (conn
->datadgst_en
)
1946 crypto_digest_update(conn
->data_tx_tfm
, &r2t
->sendbuf
.sg
, 1);
1948 if (r2t
->data_count
) {
1949 BUG_ON(ctask
->sc
->use_sg
== 0);
1950 if (!iscsi_buf_left(&r2t
->sendbuf
)) {
1951 BUG_ON(ctask
->bad_sg
== r2t
->sg
);
1952 iscsi_buf_init_sg(&r2t
->sendbuf
, r2t
->sg
);
1960 * Done with this Data-Out. Next, check if we have
1961 * to send another Data-Out for this R2T.
1963 BUG_ON(r2t
->data_length
- r2t
->sent
< 0);
1964 left
= r2t
->data_length
- r2t
->sent
;
1966 if (conn
->datadgst_en
) {
1967 if (iscsi_digest_final_send(conn
, ctask
,
1969 &dtask
->digest
, 1)) {
1970 debug_tcp("send r2t data digest 0x%x"
1971 "fail\n", dtask
->digest
);
1974 debug_tcp("r2t data send digest 0x%x\n",
1977 iscsi_solicit_data_cont(conn
, ctask
, r2t
, left
);
1978 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1979 ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
1984 * Done with this R2T. Check if there are more
1985 * outstanding R2Ts ready to be processed.
1987 BUG_ON(ctask
->r2t_data_count
- r2t
->data_length
< 0);
1988 if (conn
->datadgst_en
) {
1989 if (iscsi_digest_final_send(conn
, ctask
, &dtask
->digestbuf
,
1990 &dtask
->digest
, 1)) {
1991 debug_tcp("send last r2t data digest 0x%x"
1992 "fail\n", dtask
->digest
);
1995 debug_tcp("r2t done dout digest 0x%x\n", dtask
->digest
);
1998 ctask
->r2t_data_count
-= r2t
->data_length
;
2000 spin_lock_bh(&session
->lock
);
2001 __kfifo_put(ctask
->r2tpool
.queue
, (void*)&r2t
, sizeof(void*));
2002 spin_unlock_bh(&session
->lock
);
2003 if (__kfifo_get(ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*))) {
2005 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
2006 ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
2014 handle_xmstate_w_pad(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
2016 struct iscsi_data_task
*dtask
= ctask
->dtask
;
2019 ctask
->xmstate
&= ~XMSTATE_W_PAD
;
2020 iscsi_buf_init_virt(&ctask
->sendbuf
, (char*)&ctask
->pad
,
2022 if (iscsi_sendpage(conn
, &ctask
->sendbuf
, &ctask
->pad_count
, &sent
)) {
2023 ctask
->xmstate
|= XMSTATE_W_PAD
;
2027 if (conn
->datadgst_en
) {
2028 crypto_digest_update(conn
->data_tx_tfm
, &ctask
->sendbuf
.sg
, 1);
2031 if (iscsi_digest_final_send(conn
, ctask
, &ctask
->immbuf
,
2032 &ctask
->immdigest
, 1)) {
2033 debug_tcp("send padding digest 0x%x"
2034 "fail!\n", ctask
->immdigest
);
2037 debug_tcp("done with padding, digest 0x%x\n",
2040 if (iscsi_digest_final_send(conn
, ctask
,
2042 &dtask
->digest
, 1)) {
2043 debug_tcp("send padding digest 0x%x"
2044 "fail\n", dtask
->digest
);
2047 debug_tcp("done with padding, digest 0x%x\n",
2056 iscsi_ctask_xmit(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
2060 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2061 conn
->id
, ctask
->xmstate
, ctask
->itt
);
2064 * serialize with TMF AbortTask
2069 if (ctask
->xmstate
& XMSTATE_R_HDR
) {
2070 rc
= handle_xmstate_r_hdr(conn
, ctask
);
2074 if (ctask
->xmstate
& XMSTATE_W_HDR
) {
2075 rc
= handle_xmstate_w_hdr(conn
, ctask
);
2080 /* XXX: for data digest xmit recover */
2081 if (ctask
->xmstate
& XMSTATE_DATA_DIGEST
) {
2082 rc
= handle_xmstate_data_digest(conn
, ctask
);
2087 if (ctask
->xmstate
& XMSTATE_IMM_DATA
) {
2088 rc
= handle_xmstate_imm_data(conn
, ctask
);
2093 if (ctask
->xmstate
& XMSTATE_UNS_HDR
) {
2094 BUG_ON(!ctask
->unsol_count
);
2095 ctask
->xmstate
&= ~XMSTATE_UNS_HDR
;
2096 unsolicit_head_again
:
2097 rc
= handle_xmstate_uns_hdr(conn
, ctask
);
2102 if (ctask
->xmstate
& XMSTATE_UNS_DATA
) {
2103 rc
= handle_xmstate_uns_data(conn
, ctask
);
2105 goto unsolicit_head_again
;
2111 if (ctask
->xmstate
& XMSTATE_SOL_HDR
) {
2112 struct iscsi_r2t_info
*r2t
;
2114 ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
2115 ctask
->xmstate
|= XMSTATE_SOL_DATA
;
2117 __kfifo_get(ctask
->r2tqueue
, (void*)&ctask
->r2t
,
2121 if (conn
->hdrdgst_en
)
2122 iscsi_hdr_digest(conn
, &r2t
->headbuf
,
2123 (u8
*)r2t
->dtask
->hdrext
);
2124 if (iscsi_sendhdr(conn
, &r2t
->headbuf
, r2t
->data_count
)) {
2125 ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
2126 ctask
->xmstate
|= XMSTATE_SOL_HDR
;
2130 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2131 r2t
->solicit_datasn
- 1, ctask
->itt
, r2t
->data_count
,
2135 if (ctask
->xmstate
& XMSTATE_SOL_DATA
) {
2136 rc
= handle_xmstate_sol_data(conn
, ctask
);
2138 goto solicit_head_again
;
2145 * Last thing to check is whether we need to send write
2146 * padding. Note that we check for xmstate equality, not just the bit.
2148 if (ctask
->xmstate
== XMSTATE_W_PAD
)
2149 rc
= handle_xmstate_w_pad(conn
, ctask
);
2155 * iscsi_data_xmit - xmit any command into the scheduled connection
2156 * @conn: iscsi connection
2159 * The function can return -EAGAIN in which case the caller must
2160 * re-schedule it again later or recover. '0' return code means
2164 iscsi_data_xmit(struct iscsi_conn
*conn
)
2166 if (unlikely(conn
->suspend_tx
)) {
2167 debug_tcp("conn %d Tx suspended!\n", conn
->id
);
2172 * Transmit in the following order:
2174 * 1) un-finished xmit (ctask or mtask)
2175 * 2) immediate control PDUs
2178 * 5) non-immediate control PDUs
2180 * No need to lock around __kfifo_get as long as
2181 * there's one producer and one consumer.
2184 BUG_ON(conn
->ctask
&& conn
->mtask
);
2187 if (iscsi_ctask_xmit(conn
, conn
->ctask
))
2189 /* done with this in-progress ctask */
2193 if (iscsi_mtask_xmit(conn
, conn
->mtask
))
2195 /* done with this in-progress mtask */
2199 /* process immediate first */
2200 if (unlikely(__kfifo_len(conn
->immqueue
))) {
2201 struct iscsi_session
*session
= conn
->session
;
2202 while (__kfifo_get(conn
->immqueue
, (void*)&conn
->mtask
,
2204 if (iscsi_mtask_xmit(conn
, conn
->mtask
))
2207 if (conn
->mtask
->hdr
.itt
==
2208 cpu_to_be32(ISCSI_RESERVED_TAG
)) {
2209 spin_lock_bh(&session
->lock
);
2210 __kfifo_put(session
->mgmtpool
.queue
,
2211 (void*)&conn
->mtask
, sizeof(void*));
2212 spin_unlock_bh(&session
->lock
);
2215 /* done with this mtask */
2219 /* process write queue */
2220 while (__kfifo_get(conn
->writequeue
, (void*)&conn
->ctask
,
2222 if (iscsi_ctask_xmit(conn
, conn
->ctask
))
2226 /* process command queue */
2227 while (__kfifo_get(conn
->xmitqueue
, (void*)&conn
->ctask
,
2229 if (iscsi_ctask_xmit(conn
, conn
->ctask
))
2232 /* done with this ctask */
2235 /* process the rest control plane PDUs, if any */
2236 if (unlikely(__kfifo_len(conn
->mgmtqueue
))) {
2237 struct iscsi_session
*session
= conn
->session
;
2239 while (__kfifo_get(conn
->mgmtqueue
, (void*)&conn
->mtask
,
2241 if (iscsi_mtask_xmit(conn
, conn
->mtask
))
2244 if (conn
->mtask
->hdr
.itt
==
2245 cpu_to_be32(ISCSI_RESERVED_TAG
)) {
2246 spin_lock_bh(&session
->lock
);
2247 __kfifo_put(session
->mgmtpool
.queue
,
2248 (void*)&conn
->mtask
,
2250 spin_unlock_bh(&session
->lock
);
2253 /* done with this mtask */
2260 if (unlikely(conn
->suspend_tx
))
2267 iscsi_xmitworker(void *data
)
2269 struct iscsi_conn
*conn
= data
;
2272 * serialize Xmit worker on a per-connection basis.
2274 mutex_lock(&conn
->xmitmutex
);
2275 if (iscsi_data_xmit(conn
))
2276 scsi_queue_work(conn
->session
->host
, &conn
->xmitwork
);
2277 mutex_unlock(&conn
->xmitmutex
);
2280 #define FAILURE_BAD_HOST 1
2281 #define FAILURE_SESSION_FAILED 2
2282 #define FAILURE_SESSION_FREED 3
2283 #define FAILURE_WINDOW_CLOSED 4
2284 #define FAILURE_SESSION_TERMINATE 5
2287 iscsi_queuecommand(struct scsi_cmnd
*sc
, void (*done
)(struct scsi_cmnd
*))
2289 struct Scsi_Host
*host
;
2291 struct iscsi_session
*session
;
2292 struct iscsi_conn
*conn
= NULL
;
2293 struct iscsi_cmd_task
*ctask
= NULL
;
2295 sc
->scsi_done
= done
;
2298 host
= sc
->device
->host
;
2299 session
= iscsi_hostdata(host
->hostdata
);
2300 BUG_ON(host
!= session
->host
);
2302 spin_lock(&session
->lock
);
2304 if (session
->state
!= ISCSI_STATE_LOGGED_IN
) {
2305 if (session
->state
== ISCSI_STATE_FAILED
) {
2306 reason
= FAILURE_SESSION_FAILED
;
2308 } else if (session
->state
== ISCSI_STATE_TERMINATE
) {
2309 reason
= FAILURE_SESSION_TERMINATE
;
2312 reason
= FAILURE_SESSION_FREED
;
2317 * Check for iSCSI window and take care of CmdSN wrap-around
2319 if ((int)(session
->max_cmdsn
- session
->cmdsn
) < 0) {
2320 reason
= FAILURE_WINDOW_CLOSED
;
2324 conn
= session
->leadconn
;
2326 __kfifo_get(session
->cmdpool
.queue
, (void*)&ctask
, sizeof(void*));
2329 sc
->SCp
.phase
= session
->age
;
2330 sc
->SCp
.ptr
= (char*)ctask
;
2331 iscsi_cmd_init(conn
, ctask
, sc
);
2333 __kfifo_put(conn
->xmitqueue
, (void*)&ctask
, sizeof(void*));
2335 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2336 sc
->sc_data_direction
== DMA_TO_DEVICE
? "write" : "read",
2337 conn
->id
, (long)sc
, ctask
->itt
, sc
->request_bufflen
,
2338 session
->cmdsn
, session
->max_cmdsn
- session
->exp_cmdsn
+ 1);
2339 spin_unlock(&session
->lock
);
2341 scsi_queue_work(host
, &conn
->xmitwork
);
2345 spin_unlock(&session
->lock
);
2346 debug_scsi("cmd 0x%x rejected (%d)\n", sc
->cmnd
[0], reason
);
2347 return SCSI_MLQUEUE_HOST_BUSY
;
2350 spin_unlock(&session
->lock
);
2351 printk(KERN_ERR
"iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2352 sc
->cmnd
[0], reason
);
2353 sc
->sense_buffer
[0] = 0x70;
2354 sc
->sense_buffer
[2] = NOT_READY
;
2355 sc
->sense_buffer
[7] = 0x6;
2356 sc
->sense_buffer
[12] = 0x08;
2357 sc
->sense_buffer
[13] = 0x00;
2358 sc
->result
= (DID_NO_CONNECT
<< 16);
2359 sc
->resid
= sc
->request_bufflen
;
2365 iscsi_change_queue_depth(struct scsi_device
*sdev
, int depth
)
2367 if (depth
> ISCSI_MAX_CMD_PER_LUN
)
2368 depth
= ISCSI_MAX_CMD_PER_LUN
;
2369 scsi_adjust_queue_depth(sdev
, scsi_get_tag_type(sdev
), depth
);
2370 return sdev
->queue_depth
;
2374 iscsi_pool_init(struct iscsi_queue
*q
, int max
, void ***items
, int item_size
)
2378 *items
= kmalloc(max
* sizeof(void*), GFP_KERNEL
);
2383 q
->pool
= kmalloc(max
* sizeof(void*), GFP_KERNEL
);
2384 if (q
->pool
== NULL
) {
2389 q
->queue
= kfifo_init((void*)q
->pool
, max
* sizeof(void*),
2391 if (q
->queue
== ERR_PTR(-ENOMEM
)) {
2397 for (i
= 0; i
< max
; i
++) {
2398 q
->pool
[i
] = kmalloc(item_size
, GFP_KERNEL
);
2399 if (q
->pool
[i
] == NULL
) {
2402 for (j
= 0; j
< i
; j
++)
2405 kfifo_free(q
->queue
);
2410 memset(q
->pool
[i
], 0, item_size
);
2411 (*items
)[i
] = q
->pool
[i
];
2412 __kfifo_put(q
->queue
, (void*)&q
->pool
[i
], sizeof(void*));
2418 iscsi_pool_free(struct iscsi_queue
*q
, void **items
)
2422 for (i
= 0; i
< q
->max
; i
++)
2428 static struct iscsi_cls_conn
*
2429 iscsi_conn_create(struct iscsi_cls_session
*cls_session
, uint32_t conn_idx
)
2431 struct Scsi_Host
*shost
= iscsi_session_to_shost(cls_session
);
2432 struct iscsi_session
*session
= iscsi_hostdata(shost
->hostdata
);
2433 struct iscsi_conn
*conn
;
2434 struct iscsi_cls_conn
*cls_conn
;
2436 cls_conn
= iscsi_create_conn(cls_session
, conn_idx
);
2439 conn
= cls_conn
->dd_data
;
2440 memset(conn
, 0, sizeof(*conn
));
2442 conn
->cls_conn
= cls_conn
;
2443 conn
->c_stage
= ISCSI_CONN_INITIAL_STAGE
;
2444 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
2445 conn
->id
= conn_idx
;
2446 conn
->exp_statsn
= 0;
2447 conn
->tmabort_state
= TMABORT_INITIAL
;
2449 /* initial operational parameters */
2450 conn
->hdr_size
= sizeof(struct iscsi_hdr
);
2451 conn
->data_size
= DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH
;
2452 conn
->max_recv_dlength
= DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH
;
2454 /* initialize general xmit PDU commands queue */
2455 conn
->xmitqueue
= kfifo_alloc(session
->cmds_max
* sizeof(void*),
2457 if (conn
->xmitqueue
== ERR_PTR(-ENOMEM
))
2458 goto xmitqueue_alloc_fail
;
2460 /* initialize write response PDU commands queue */
2461 conn
->writequeue
= kfifo_alloc(session
->cmds_max
* sizeof(void*),
2463 if (conn
->writequeue
== ERR_PTR(-ENOMEM
))
2464 goto writequeue_alloc_fail
;
2466 /* initialize general immediate & non-immediate PDU commands queue */
2467 conn
->immqueue
= kfifo_alloc(session
->mgmtpool_max
* sizeof(void*),
2469 if (conn
->immqueue
== ERR_PTR(-ENOMEM
))
2470 goto immqueue_alloc_fail
;
2472 conn
->mgmtqueue
= kfifo_alloc(session
->mgmtpool_max
* sizeof(void*),
2474 if (conn
->mgmtqueue
== ERR_PTR(-ENOMEM
))
2475 goto mgmtqueue_alloc_fail
;
2477 INIT_WORK(&conn
->xmitwork
, iscsi_xmitworker
, conn
);
2479 /* allocate login_mtask used for the login/text sequences */
2480 spin_lock_bh(&session
->lock
);
2481 if (!__kfifo_get(session
->mgmtpool
.queue
,
2482 (void*)&conn
->login_mtask
,
2484 spin_unlock_bh(&session
->lock
);
2485 goto login_mtask_alloc_fail
;
2487 spin_unlock_bh(&session
->lock
);
2489 /* allocate initial PDU receive place holder */
2490 if (conn
->data_size
<= PAGE_SIZE
)
2491 conn
->data
= kmalloc(conn
->data_size
, GFP_KERNEL
);
2493 conn
->data
= (void*)__get_free_pages(GFP_KERNEL
,
2494 get_order(conn
->data_size
));
2496 goto max_recv_dlenght_alloc_fail
;
2498 init_timer(&conn
->tmabort_timer
);
2499 mutex_init(&conn
->xmitmutex
);
2500 init_waitqueue_head(&conn
->ehwait
);
2504 max_recv_dlenght_alloc_fail
:
2505 spin_lock_bh(&session
->lock
);
2506 __kfifo_put(session
->mgmtpool
.queue
, (void*)&conn
->login_mtask
,
2508 spin_unlock_bh(&session
->lock
);
2509 login_mtask_alloc_fail
:
2510 kfifo_free(conn
->mgmtqueue
);
2511 mgmtqueue_alloc_fail
:
2512 kfifo_free(conn
->immqueue
);
2513 immqueue_alloc_fail
:
2514 kfifo_free(conn
->writequeue
);
2515 writequeue_alloc_fail
:
2516 kfifo_free(conn
->xmitqueue
);
2517 xmitqueue_alloc_fail
:
2518 iscsi_destroy_conn(cls_conn
);
2523 iscsi_conn_destroy(struct iscsi_cls_conn
*cls_conn
)
2525 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2526 struct iscsi_session
*session
= conn
->session
;
2527 unsigned long flags
;
2529 mutex_lock(&conn
->xmitmutex
);
2530 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2531 if (conn
->c_stage
== ISCSI_CONN_INITIAL_STAGE
&& conn
->sock
) {
2532 struct sock
*sk
= conn
->sock
->sk
;
2535 * conn_start() has never been called!
2536 * need to cleanup the socket.
2538 write_lock_bh(&sk
->sk_callback_lock
);
2539 set_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2540 write_unlock_bh(&sk
->sk_callback_lock
);
2542 sock_hold(conn
->sock
->sk
);
2543 iscsi_conn_restore_callbacks(conn
);
2544 sock_put(conn
->sock
->sk
);
2545 sock_release(conn
->sock
);
2549 spin_lock_bh(&session
->lock
);
2550 conn
->c_stage
= ISCSI_CONN_CLEANUP_WAIT
;
2551 if (session
->leadconn
== conn
) {
2553 * leading connection? then give up on recovery.
2555 session
->state
= ISCSI_STATE_TERMINATE
;
2556 wake_up(&conn
->ehwait
);
2558 spin_unlock_bh(&session
->lock
);
2560 mutex_unlock(&conn
->xmitmutex
);
2563 * Block until all in-progress commands for this connection
2567 spin_lock_irqsave(session
->host
->host_lock
, flags
);
2568 if (!session
->host
->host_busy
) { /* OK for ERL == 0 */
2569 spin_unlock_irqrestore(session
->host
->host_lock
, flags
);
2572 spin_unlock_irqrestore(session
->host
->host_lock
, flags
);
2573 msleep_interruptible(500);
2574 printk("conn_destroy(): host_busy %d host_failed %d\n",
2575 session
->host
->host_busy
, session
->host
->host_failed
);
2577 * force eh_abort() to unblock
2579 wake_up(&conn
->ehwait
);
2582 /* now free crypto */
2583 if (conn
->hdrdgst_en
|| conn
->datadgst_en
) {
2585 crypto_free_tfm(conn
->tx_tfm
);
2587 crypto_free_tfm(conn
->rx_tfm
);
2588 if (conn
->data_tx_tfm
)
2589 crypto_free_tfm(conn
->data_tx_tfm
);
2590 if (conn
->data_rx_tfm
)
2591 crypto_free_tfm(conn
->data_rx_tfm
);
2594 /* free conn->data, size = MaxRecvDataSegmentLength */
2595 if (conn
->data_size
<= PAGE_SIZE
)
2598 free_pages((unsigned long)conn
->data
,
2599 get_order(conn
->data_size
));
2601 spin_lock_bh(&session
->lock
);
2602 __kfifo_put(session
->mgmtpool
.queue
, (void*)&conn
->login_mtask
,
2604 list_del(&conn
->item
);
2605 if (list_empty(&session
->connections
))
2606 session
->leadconn
= NULL
;
2607 if (session
->leadconn
&& session
->leadconn
== conn
)
2608 session
->leadconn
= container_of(session
->connections
.next
,
2609 struct iscsi_conn
, item
);
2611 if (session
->leadconn
== NULL
)
2612 /* none connections exits.. reset sequencing */
2613 session
->cmdsn
= session
->max_cmdsn
= session
->exp_cmdsn
= 1;
2614 spin_unlock_bh(&session
->lock
);
2616 kfifo_free(conn
->xmitqueue
);
2617 kfifo_free(conn
->writequeue
);
2618 kfifo_free(conn
->immqueue
);
2619 kfifo_free(conn
->mgmtqueue
);
2621 iscsi_destroy_conn(cls_conn
);
2625 iscsi_conn_bind(struct iscsi_cls_session
*cls_session
,
2626 struct iscsi_cls_conn
*cls_conn
, uint32_t transport_fd
,
2629 struct Scsi_Host
*shost
= iscsi_session_to_shost(cls_session
);
2630 struct iscsi_session
*session
= iscsi_hostdata(shost
->hostdata
);
2631 struct iscsi_conn
*tmp
= ERR_PTR(-EEXIST
), *conn
= cls_conn
->dd_data
;
2633 struct socket
*sock
;
2636 /* lookup for existing socket */
2637 sock
= sockfd_lookup(transport_fd
, &err
);
2639 printk(KERN_ERR
"iscsi_tcp: sockfd_lookup failed %d\n", err
);
2643 /* lookup for existing connection */
2644 spin_lock_bh(&session
->lock
);
2645 list_for_each_entry(tmp
, &session
->connections
, item
) {
2647 if (conn
->c_stage
!= ISCSI_CONN_STOPPED
||
2648 conn
->stop_stage
== STOP_CONN_TERM
) {
2649 printk(KERN_ERR
"iscsi_tcp: can't bind "
2650 "non-stopped connection (%d:%d)\n",
2651 conn
->c_stage
, conn
->stop_stage
);
2652 spin_unlock_bh(&session
->lock
);
2659 /* bind new iSCSI connection to session */
2660 conn
->session
= session
;
2662 list_add(&conn
->item
, &session
->connections
);
2664 spin_unlock_bh(&session
->lock
);
2666 if (conn
->stop_stage
!= STOP_CONN_SUSPEND
) {
2667 /* bind iSCSI connection and socket */
2670 /* setup Socket parameters */
2673 sk
->sk_sndtimeo
= 15 * HZ
; /* FIXME: make it configurable */
2674 sk
->sk_allocation
= GFP_ATOMIC
;
2676 /* FIXME: disable Nagle's algorithm */
2679 * Intercept TCP callbacks for sendfile like receive
2682 iscsi_conn_set_callbacks(conn
);
2684 conn
->sendpage
= conn
->sock
->ops
->sendpage
;
2687 * set receive state machine into initial state
2689 conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
2693 session
->leadconn
= conn
;
2696 * Unblock xmitworker(), Login Phase will pass through.
2698 clear_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2699 clear_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2705 iscsi_conn_start(struct iscsi_cls_conn
*cls_conn
)
2707 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2708 struct iscsi_session
*session
= conn
->session
;
2711 /* FF phase warming up... */
2713 if (session
== NULL
) {
2714 printk(KERN_ERR
"iscsi_tcp: can't start unbound connection\n");
2718 sk
= conn
->sock
->sk
;
2720 write_lock_bh(&sk
->sk_callback_lock
);
2721 spin_lock_bh(&session
->lock
);
2722 conn
->c_stage
= ISCSI_CONN_STARTED
;
2723 session
->state
= ISCSI_STATE_LOGGED_IN
;
2725 switch(conn
->stop_stage
) {
2726 case STOP_CONN_RECOVER
:
2728 * unblock eh_abort() if it is blocked. re-try all
2729 * commands after successful recovery
2731 session
->conn_cnt
++;
2732 conn
->stop_stage
= 0;
2733 conn
->tmabort_state
= TMABORT_INITIAL
;
2735 wake_up(&conn
->ehwait
);
2737 case STOP_CONN_TERM
:
2738 session
->conn_cnt
++;
2739 conn
->stop_stage
= 0;
2741 case STOP_CONN_SUSPEND
:
2742 conn
->stop_stage
= 0;
2743 clear_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2744 clear_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2749 spin_unlock_bh(&session
->lock
);
2750 write_unlock_bh(&sk
->sk_callback_lock
);
2756 iscsi_conn_stop(struct iscsi_cls_conn
*cls_conn
, int flag
)
2758 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2759 struct iscsi_session
*session
= conn
->session
;
2761 unsigned long flags
;
2763 BUG_ON(!conn
->sock
);
2764 sk
= conn
->sock
->sk
;
2765 write_lock_bh(&sk
->sk_callback_lock
);
2766 set_bit(SUSPEND_BIT
, &conn
->suspend_rx
);
2767 write_unlock_bh(&sk
->sk_callback_lock
);
2769 mutex_lock(&conn
->xmitmutex
);
2771 spin_lock_irqsave(session
->host
->host_lock
, flags
);
2772 spin_lock(&session
->lock
);
2773 conn
->stop_stage
= flag
;
2774 conn
->c_stage
= ISCSI_CONN_STOPPED
;
2775 set_bit(SUSPEND_BIT
, &conn
->suspend_tx
);
2777 if (flag
!= STOP_CONN_SUSPEND
)
2778 session
->conn_cnt
--;
2780 if (session
->conn_cnt
== 0 || session
->leadconn
== conn
)
2781 session
->state
= ISCSI_STATE_FAILED
;
2783 spin_unlock(&session
->lock
);
2784 spin_unlock_irqrestore(session
->host
->host_lock
, flags
);
2786 if (flag
== STOP_CONN_TERM
|| flag
== STOP_CONN_RECOVER
) {
2787 struct iscsi_cmd_task
*ctask
;
2788 struct iscsi_mgmt_task
*mtask
;
2791 * Socket must go now.
2793 sock_hold(conn
->sock
->sk
);
2794 iscsi_conn_restore_callbacks(conn
);
2795 sock_put(conn
->sock
->sk
);
2798 * flush xmit queues.
2800 spin_lock_bh(&session
->lock
);
2801 while (__kfifo_get(conn
->writequeue
, (void*)&ctask
,
2803 __kfifo_get(conn
->xmitqueue
, (void*)&ctask
,
2805 struct iscsi_r2t_info
*r2t
;
2808 * flush ctask's r2t queues
2810 while (__kfifo_get(ctask
->r2tqueue
, (void*)&r2t
,
2812 __kfifo_put(ctask
->r2tpool
.queue
, (void*)&r2t
,
2815 spin_unlock_bh(&session
->lock
);
2817 iscsi_ctask_cleanup(conn
, ctask
);
2819 spin_lock_bh(&session
->lock
);
2822 while (__kfifo_get(conn
->immqueue
, (void*)&mtask
,
2824 __kfifo_get(conn
->mgmtqueue
, (void*)&mtask
,
2826 __kfifo_put(session
->mgmtpool
.queue
,
2827 (void*)&mtask
, sizeof(void*));
2830 spin_unlock_bh(&session
->lock
);
2833 * release socket only after we stopped data_xmit()
2834 * activity and flushed all outstandings
2836 sock_release(conn
->sock
);
2840 * for connection level recovery we should not calculate
2841 * header digest. conn->hdr_size used for optimization
2842 * in hdr_extract() and will be re-negotiated at
2845 if (flag
== STOP_CONN_RECOVER
) {
2846 conn
->hdr_size
= sizeof(struct iscsi_hdr
);
2847 conn
->hdrdgst_en
= 0;
2848 conn
->datadgst_en
= 0;
2851 mutex_unlock(&conn
->xmitmutex
);
2855 iscsi_conn_send_generic(struct iscsi_conn
*conn
, struct iscsi_hdr
*hdr
,
2856 char *data
, uint32_t data_size
)
2858 struct iscsi_session
*session
= conn
->session
;
2859 struct iscsi_nopout
*nop
= (struct iscsi_nopout
*)hdr
;
2860 struct iscsi_mgmt_task
*mtask
;
2862 spin_lock_bh(&session
->lock
);
2863 if (session
->state
== ISCSI_STATE_TERMINATE
) {
2864 spin_unlock_bh(&session
->lock
);
2867 if (hdr
->opcode
== (ISCSI_OP_LOGIN
| ISCSI_OP_IMMEDIATE
) ||
2868 hdr
->opcode
== (ISCSI_OP_TEXT
| ISCSI_OP_IMMEDIATE
))
2870 * Login and Text are sent serially, in
2871 * request-followed-by-response sequence.
2872 * Same mtask can be used. Same ITT must be used.
2873 * Note that login_mtask is preallocated at conn_create().
2875 mtask
= conn
->login_mtask
;
2877 BUG_ON(conn
->c_stage
== ISCSI_CONN_INITIAL_STAGE
);
2878 BUG_ON(conn
->c_stage
== ISCSI_CONN_STOPPED
);
2880 if (!__kfifo_get(session
->mgmtpool
.queue
,
2881 (void*)&mtask
, sizeof(void*))) {
2882 spin_unlock_bh(&session
->lock
);
2888 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2890 if (hdr
->itt
!= cpu_to_be32(ISCSI_RESERVED_TAG
)) {
2891 hdr
->itt
= mtask
->itt
| (conn
->id
<< CID_SHIFT
) |
2892 (session
->age
<< AGE_SHIFT
);
2893 nop
->cmdsn
= cpu_to_be32(session
->cmdsn
);
2894 if (conn
->c_stage
== ISCSI_CONN_STARTED
&&
2895 !(hdr
->opcode
& ISCSI_OP_IMMEDIATE
))
2898 /* do not advance CmdSN */
2899 nop
->cmdsn
= cpu_to_be32(session
->cmdsn
);
2901 nop
->exp_statsn
= cpu_to_be32(conn
->exp_statsn
);
2903 memcpy(&mtask
->hdr
, hdr
, sizeof(struct iscsi_hdr
));
2905 iscsi_buf_init_virt(&mtask
->headbuf
, (char*)&mtask
->hdr
,
2906 sizeof(struct iscsi_hdr
));
2908 spin_unlock_bh(&session
->lock
);
2911 memcpy(mtask
->data
, data
, data_size
);
2912 mtask
->data_count
= data_size
;
2914 mtask
->data_count
= 0;
2916 mtask
->xmstate
= XMSTATE_IMM_HDR
;
2918 if (mtask
->data_count
) {
2919 iscsi_buf_init_iov(&mtask
->sendbuf
, (char*)mtask
->data
,
2923 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2924 hdr
->opcode
, hdr
->itt
, data_size
);
2927 * since send_pdu() could be called at least from two contexts,
2928 * we need to serialize __kfifo_put, so we don't have to take
2929 * additional lock on fast data-path
2931 if (hdr
->opcode
& ISCSI_OP_IMMEDIATE
)
2932 __kfifo_put(conn
->immqueue
, (void*)&mtask
, sizeof(void*));
2934 __kfifo_put(conn
->mgmtqueue
, (void*)&mtask
, sizeof(void*));
2936 scsi_queue_work(session
->host
, &conn
->xmitwork
);
2941 iscsi_eh_host_reset(struct scsi_cmnd
*sc
)
2943 struct iscsi_cmd_task
*ctask
= (struct iscsi_cmd_task
*)sc
->SCp
.ptr
;
2944 struct iscsi_conn
*conn
= ctask
->conn
;
2945 struct iscsi_session
*session
= conn
->session
;
2947 spin_lock_bh(&session
->lock
);
2948 if (session
->state
== ISCSI_STATE_TERMINATE
) {
2949 debug_scsi("failing host reset: session terminated "
2950 "[CID %d age %d]", conn
->id
, session
->age
);
2951 spin_unlock_bh(&session
->lock
);
2954 spin_unlock_bh(&session
->lock
);
2956 debug_scsi("failing connection CID %d due to SCSI host reset "
2957 "[itt 0x%x age %d]", conn
->id
, ctask
->itt
,
2959 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
2965 iscsi_tmabort_timedout(unsigned long data
)
2967 struct iscsi_cmd_task
*ctask
= (struct iscsi_cmd_task
*)data
;
2968 struct iscsi_conn
*conn
= ctask
->conn
;
2969 struct iscsi_session
*session
= conn
->session
;
2971 spin_lock(&session
->lock
);
2972 if (conn
->tmabort_state
== TMABORT_INITIAL
) {
2973 __kfifo_put(session
->mgmtpool
.queue
,
2974 (void*)&ctask
->mtask
, sizeof(void*));
2975 conn
->tmabort_state
= TMABORT_TIMEDOUT
;
2976 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
2977 (long)ctask
->sc
, ctask
->itt
);
2978 /* unblock eh_abort() */
2979 wake_up(&conn
->ehwait
);
2981 spin_unlock(&session
->lock
);
2985 iscsi_eh_abort(struct scsi_cmnd
*sc
)
2988 struct iscsi_cmd_task
*ctask
= (struct iscsi_cmd_task
*)sc
->SCp
.ptr
;
2989 struct iscsi_conn
*conn
= ctask
->conn
;
2990 struct iscsi_session
*session
= conn
->session
;
2992 conn
->eh_abort_cnt
++;
2993 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc
, ctask
->itt
);
2996 * two cases for ERL=0 here:
2998 * 1) connection-level failure;
2999 * 2) recovery due protocol error;
3001 mutex_lock(&conn
->xmitmutex
);
3002 spin_lock_bh(&session
->lock
);
3003 if (session
->state
!= ISCSI_STATE_LOGGED_IN
) {
3004 if (session
->state
== ISCSI_STATE_TERMINATE
) {
3005 spin_unlock_bh(&session
->lock
);
3006 mutex_unlock(&conn
->xmitmutex
);
3009 spin_unlock_bh(&session
->lock
);
3011 struct iscsi_tm
*hdr
= &conn
->tmhdr
;
3014 * Still LOGGED_IN...
3017 if (!ctask
->sc
|| sc
->SCp
.phase
!= session
->age
) {
3019 * 1) ctask completed before time out. But session
3020 * is still ok => Happy Retry.
3021 * 2) session was re-open during time out of ctask.
3023 spin_unlock_bh(&session
->lock
);
3024 mutex_unlock(&conn
->xmitmutex
);
3027 conn
->tmabort_state
= TMABORT_INITIAL
;
3028 spin_unlock_bh(&session
->lock
);
3031 * ctask timed out but session is OK
3032 * ERL=0 requires task mgmt abort to be issued on each
3033 * failed command. requests must be serialized.
3035 memset(hdr
, 0, sizeof(struct iscsi_tm
));
3036 hdr
->opcode
= ISCSI_OP_SCSI_TMFUNC
| ISCSI_OP_IMMEDIATE
;
3037 hdr
->flags
= ISCSI_TM_FUNC_ABORT_TASK
;
3038 hdr
->flags
|= ISCSI_FLAG_CMD_FINAL
;
3039 memcpy(hdr
->lun
, ctask
->hdr
.lun
, sizeof(hdr
->lun
));
3040 hdr
->rtt
= ctask
->hdr
.itt
;
3041 hdr
->refcmdsn
= ctask
->hdr
.cmdsn
;
3043 rc
= iscsi_conn_send_generic(conn
, (struct iscsi_hdr
*)hdr
,
3046 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
3047 debug_scsi("abort sent failure [itt 0x%x]", ctask
->itt
);
3049 struct iscsi_r2t_info
*r2t
;
3052 * TMF abort vs. TMF response race logic
3054 spin_lock_bh(&session
->lock
);
3055 ctask
->mtask
= (struct iscsi_mgmt_task
*)
3056 session
->mgmt_cmds
[(hdr
->itt
& ITT_MASK
) -
3057 ISCSI_MGMT_ITT_OFFSET
];
3059 * have to flush r2tqueue to avoid r2t leaks
3061 while (__kfifo_get(ctask
->r2tqueue
, (void*)&r2t
,
3063 __kfifo_put(ctask
->r2tpool
.queue
, (void*)&r2t
,
3066 if (conn
->tmabort_state
== TMABORT_INITIAL
) {
3067 conn
->tmfcmd_pdus_cnt
++;
3068 conn
->tmabort_timer
.expires
= 3*HZ
+ jiffies
;
3069 conn
->tmabort_timer
.function
=
3070 iscsi_tmabort_timedout
;
3071 conn
->tmabort_timer
.data
= (unsigned long)ctask
;
3072 add_timer(&conn
->tmabort_timer
);
3073 debug_scsi("abort sent [itt 0x%x]", ctask
->itt
);
3076 conn
->tmabort_state
== TMABORT_SUCCESS
) {
3077 conn
->tmabort_state
= TMABORT_INITIAL
;
3078 spin_unlock_bh(&session
->lock
);
3079 mutex_unlock(&conn
->xmitmutex
);
3082 conn
->tmabort_state
= TMABORT_INITIAL
;
3083 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
3085 spin_unlock_bh(&session
->lock
);
3088 mutex_unlock(&conn
->xmitmutex
);
3092 * block eh thread until:
3094 * 1) abort response;
3096 * 3) session re-opened;
3097 * 4) session terminated;
3100 int p_state
= session
->state
;
3102 rc
= wait_event_interruptible(conn
->ehwait
,
3103 (p_state
== ISCSI_STATE_LOGGED_IN
?
3104 (session
->state
== ISCSI_STATE_TERMINATE
||
3105 conn
->tmabort_state
!= TMABORT_INITIAL
) :
3106 (session
->state
== ISCSI_STATE_TERMINATE
||
3107 session
->state
== ISCSI_STATE_LOGGED_IN
)));
3110 session
->state
= ISCSI_STATE_TERMINATE
;
3114 if (signal_pending(current
))
3115 flush_signals(current
);
3117 if (session
->state
== ISCSI_STATE_TERMINATE
)
3120 spin_lock_bh(&session
->lock
);
3121 if (sc
->SCp
.phase
== session
->age
&&
3122 (conn
->tmabort_state
== TMABORT_TIMEDOUT
||
3123 conn
->tmabort_state
== TMABORT_FAILED
)) {
3124 conn
->tmabort_state
= TMABORT_INITIAL
;
3127 * ctask completed before tmf abort response or
3129 * But session is still ok => Happy Retry.
3131 spin_unlock_bh(&session
->lock
);
3134 spin_unlock_bh(&session
->lock
);
3135 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
3138 spin_unlock_bh(&session
->lock
);
3143 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc
, ctask
->itt
);
3148 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc
, ctask
->itt
);
3152 del_timer_sync(&conn
->tmabort_timer
);
3154 mutex_lock(&conn
->xmitmutex
);
3156 struct sock
*sk
= conn
->sock
->sk
;
3158 write_lock_bh(&sk
->sk_callback_lock
);
3159 iscsi_ctask_cleanup(conn
, ctask
);
3160 write_unlock_bh(&sk
->sk_callback_lock
);
3162 mutex_unlock(&conn
->xmitmutex
);
3167 iscsi_r2tpool_alloc(struct iscsi_session
*session
)
3173 * initialize per-task: R2T pool and xmit queue
3175 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
3176 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
3179 * pre-allocated x4 as much r2ts to handle race when
3180 * target acks DataOut faster than we data_xmit() queues
3181 * could replenish r2tqueue.
3185 if (iscsi_pool_init(&ctask
->r2tpool
, session
->max_r2t
* 4,
3186 (void***)&ctask
->r2ts
, sizeof(struct iscsi_r2t_info
))) {
3187 goto r2t_alloc_fail
;
3190 /* R2T xmit queue */
3191 ctask
->r2tqueue
= kfifo_alloc(
3192 session
->max_r2t
* 4 * sizeof(void*), GFP_KERNEL
, NULL
);
3193 if (ctask
->r2tqueue
== ERR_PTR(-ENOMEM
)) {
3194 iscsi_pool_free(&ctask
->r2tpool
, (void**)ctask
->r2ts
);
3195 goto r2t_alloc_fail
;
3200 * Data-Out PDU's within R2T-sequence can be quite big;
3203 ctask
->datapool
= mempool_create(ISCSI_DTASK_DEFAULT_MAX
,
3204 mempool_alloc_slab
, mempool_free_slab
, taskcache
);
3205 if (ctask
->datapool
== NULL
) {
3206 kfifo_free(ctask
->r2tqueue
);
3207 iscsi_pool_free(&ctask
->r2tpool
, (void**)ctask
->r2ts
);
3208 goto r2t_alloc_fail
;
3210 INIT_LIST_HEAD(&ctask
->dataqueue
);
3216 for (i
= 0; i
< cmd_i
; i
++) {
3217 mempool_destroy(session
->cmds
[i
]->datapool
);
3218 kfifo_free(session
->cmds
[i
]->r2tqueue
);
3219 iscsi_pool_free(&session
->cmds
[i
]->r2tpool
,
3220 (void**)session
->cmds
[i
]->r2ts
);
3226 iscsi_r2tpool_free(struct iscsi_session
*session
)
3230 for (i
= 0; i
< session
->cmds_max
; i
++) {
3231 mempool_destroy(session
->cmds
[i
]->datapool
);
3232 kfifo_free(session
->cmds
[i
]->r2tqueue
);
3233 iscsi_pool_free(&session
->cmds
[i
]->r2tpool
,
3234 (void**)session
->cmds
[i
]->r2ts
);
3238 static struct scsi_host_template iscsi_sht
= {
3239 .name
= "iSCSI Initiator over TCP/IP, v."
3241 .queuecommand
= iscsi_queuecommand
,
3242 .change_queue_depth
= iscsi_change_queue_depth
,
3243 .can_queue
= ISCSI_XMIT_CMDS_MAX
- 1,
3244 .sg_tablesize
= ISCSI_SG_TABLESIZE
,
3245 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
3246 .eh_abort_handler
= iscsi_eh_abort
,
3247 .eh_host_reset_handler
= iscsi_eh_host_reset
,
3248 .use_clustering
= DISABLE_CLUSTERING
,
3249 .proc_name
= "iscsi_tcp",
3253 static struct iscsi_transport iscsi_tcp_transport
;
3255 static struct iscsi_cls_session
*
3256 iscsi_session_create(struct scsi_transport_template
*scsit
,
3257 uint32_t initial_cmdsn
, uint32_t *sid
)
3259 struct Scsi_Host
*shost
;
3260 struct iscsi_session
*session
;
3263 shost
= iscsi_transport_create_session(scsit
, &iscsi_tcp_transport
);
3267 session
= iscsi_hostdata(shost
->hostdata
);
3268 memset(session
, 0, sizeof(struct iscsi_session
));
3269 session
->host
= shost
;
3270 session
->state
= ISCSI_STATE_FREE
;
3271 session
->mgmtpool_max
= ISCSI_MGMT_CMDS_MAX
;
3272 session
->cmds_max
= ISCSI_XMIT_CMDS_MAX
;
3273 session
->cmdsn
= initial_cmdsn
;
3274 session
->exp_cmdsn
= initial_cmdsn
+ 1;
3275 session
->max_cmdsn
= initial_cmdsn
+ 1;
3276 session
->max_r2t
= 1;
3277 *sid
= shost
->host_no
;
3279 /* initialize SCSI PDU commands pool */
3280 if (iscsi_pool_init(&session
->cmdpool
, session
->cmds_max
,
3281 (void***)&session
->cmds
, sizeof(struct iscsi_cmd_task
)))
3282 goto cmdpool_alloc_fail
;
3284 /* pre-format cmds pool with ITT */
3285 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++)
3286 session
->cmds
[cmd_i
]->itt
= cmd_i
;
3288 spin_lock_init(&session
->lock
);
3289 INIT_LIST_HEAD(&session
->connections
);
3291 /* initialize immediate command pool */
3292 if (iscsi_pool_init(&session
->mgmtpool
, session
->mgmtpool_max
,
3293 (void***)&session
->mgmt_cmds
, sizeof(struct iscsi_mgmt_task
)))
3294 goto mgmtpool_alloc_fail
;
3297 /* pre-format immediate cmds pool with ITT */
3298 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++) {
3299 session
->mgmt_cmds
[cmd_i
]->itt
= ISCSI_MGMT_ITT_OFFSET
+ cmd_i
;
3300 session
->mgmt_cmds
[cmd_i
]->data
= kmalloc(
3301 DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH
, GFP_KERNEL
);
3302 if (!session
->mgmt_cmds
[cmd_i
]->data
) {
3305 for (j
= 0; j
< cmd_i
; j
++)
3306 kfree(session
->mgmt_cmds
[j
]->data
);
3307 goto immdata_alloc_fail
;
3311 if (iscsi_r2tpool_alloc(session
))
3312 goto r2tpool_alloc_fail
;
3314 return hostdata_session(shost
->hostdata
);
3317 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++)
3318 kfree(session
->mgmt_cmds
[cmd_i
]->data
);
3320 iscsi_pool_free(&session
->mgmtpool
, (void**)session
->mgmt_cmds
);
3321 mgmtpool_alloc_fail
:
3322 iscsi_pool_free(&session
->cmdpool
, (void**)session
->cmds
);
3324 iscsi_transport_destroy_session(shost
);
3329 iscsi_session_destroy(struct iscsi_cls_session
*cls_session
)
3331 struct Scsi_Host
*shost
= iscsi_session_to_shost(cls_session
);
3332 struct iscsi_session
*session
= iscsi_hostdata(shost
->hostdata
);
3334 struct iscsi_data_task
*dtask
, *n
;
3336 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
3337 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
3338 list_for_each_entry_safe(dtask
, n
, &ctask
->dataqueue
, item
) {
3339 list_del(&dtask
->item
);
3340 mempool_free(dtask
, ctask
->datapool
);
3344 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++)
3345 kfree(session
->mgmt_cmds
[cmd_i
]->data
);
3347 iscsi_r2tpool_free(session
);
3348 iscsi_pool_free(&session
->mgmtpool
, (void**)session
->mgmt_cmds
);
3349 iscsi_pool_free(&session
->cmdpool
, (void**)session
->cmds
);
3351 iscsi_transport_destroy_session(shost
);
3355 iscsi_conn_set_param(struct iscsi_cls_conn
*cls_conn
, enum iscsi_param param
,
3358 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
3359 struct iscsi_session
*session
= conn
->session
;
3361 spin_lock_bh(&session
->lock
);
3362 if (conn
->c_stage
!= ISCSI_CONN_INITIAL_STAGE
&&
3363 conn
->stop_stage
!= STOP_CONN_RECOVER
) {
3364 printk(KERN_ERR
"iscsi_tcp: can not change parameter [%d]\n",
3366 spin_unlock_bh(&session
->lock
);
3369 spin_unlock_bh(&session
->lock
);
3372 case ISCSI_PARAM_MAX_RECV_DLENGTH
: {
3373 char *saveptr
= conn
->data
;
3374 gfp_t flags
= GFP_KERNEL
;
3376 if (conn
->data_size
>= value
) {
3377 conn
->max_recv_dlength
= value
;
3381 spin_lock_bh(&session
->lock
);
3382 if (conn
->stop_stage
== STOP_CONN_RECOVER
)
3384 spin_unlock_bh(&session
->lock
);
3386 if (value
<= PAGE_SIZE
)
3387 conn
->data
= kmalloc(value
, flags
);
3389 conn
->data
= (void*)__get_free_pages(flags
,
3391 if (conn
->data
== NULL
) {
3392 conn
->data
= saveptr
;
3395 if (conn
->data_size
<= PAGE_SIZE
)
3398 free_pages((unsigned long)saveptr
,
3399 get_order(conn
->data_size
));
3400 conn
->max_recv_dlength
= value
;
3401 conn
->data_size
= value
;
3404 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
3405 conn
->max_xmit_dlength
= value
;
3407 case ISCSI_PARAM_HDRDGST_EN
:
3408 conn
->hdrdgst_en
= value
;
3409 conn
->hdr_size
= sizeof(struct iscsi_hdr
);
3410 if (conn
->hdrdgst_en
) {
3411 conn
->hdr_size
+= sizeof(__u32
);
3413 conn
->tx_tfm
= crypto_alloc_tfm("crc32c", 0);
3417 conn
->rx_tfm
= crypto_alloc_tfm("crc32c", 0);
3418 if (!conn
->rx_tfm
) {
3419 crypto_free_tfm(conn
->tx_tfm
);
3424 crypto_free_tfm(conn
->tx_tfm
);
3426 crypto_free_tfm(conn
->rx_tfm
);
3429 case ISCSI_PARAM_DATADGST_EN
:
3430 conn
->datadgst_en
= value
;
3431 if (conn
->datadgst_en
) {
3432 if (!conn
->data_tx_tfm
)
3434 crypto_alloc_tfm("crc32c", 0);
3435 if (!conn
->data_tx_tfm
)
3437 if (!conn
->data_rx_tfm
)
3439 crypto_alloc_tfm("crc32c", 0);
3440 if (!conn
->data_rx_tfm
) {
3441 crypto_free_tfm(conn
->data_tx_tfm
);
3445 if (conn
->data_tx_tfm
)
3446 crypto_free_tfm(conn
->data_tx_tfm
);
3447 if (conn
->data_rx_tfm
)
3448 crypto_free_tfm(conn
->data_rx_tfm
);
3450 conn
->sendpage
= conn
->datadgst_en
?
3451 sock_no_sendpage
: conn
->sock
->ops
->sendpage
;
3453 case ISCSI_PARAM_INITIAL_R2T_EN
:
3454 session
->initial_r2t_en
= value
;
3456 case ISCSI_PARAM_MAX_R2T
:
3457 if (session
->max_r2t
== roundup_pow_of_two(value
))
3459 iscsi_r2tpool_free(session
);
3460 session
->max_r2t
= value
;
3461 if (session
->max_r2t
& (session
->max_r2t
- 1))
3462 session
->max_r2t
= roundup_pow_of_two(session
->max_r2t
);
3463 if (iscsi_r2tpool_alloc(session
))
3466 case ISCSI_PARAM_IMM_DATA_EN
:
3467 session
->imm_data_en
= value
;
3469 case ISCSI_PARAM_FIRST_BURST
:
3470 session
->first_burst
= value
;
3472 case ISCSI_PARAM_MAX_BURST
:
3473 session
->max_burst
= value
;
3475 case ISCSI_PARAM_PDU_INORDER_EN
:
3476 session
->pdu_inorder_en
= value
;
3478 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
3479 session
->dataseq_inorder_en
= value
;
3481 case ISCSI_PARAM_ERL
:
3482 session
->erl
= value
;
3484 case ISCSI_PARAM_IFMARKER_EN
:
3486 session
->ifmarker_en
= value
;
3488 case ISCSI_PARAM_OFMARKER_EN
:
3490 session
->ofmarker_en
= value
;
3500 iscsi_session_get_param(struct iscsi_cls_session
*cls_session
,
3501 enum iscsi_param param
, uint32_t *value
)
3503 struct Scsi_Host
*shost
= iscsi_session_to_shost(cls_session
);
3504 struct iscsi_session
*session
= iscsi_hostdata(shost
->hostdata
);
3507 case ISCSI_PARAM_INITIAL_R2T_EN
:
3508 *value
= session
->initial_r2t_en
;
3510 case ISCSI_PARAM_MAX_R2T
:
3511 *value
= session
->max_r2t
;
3513 case ISCSI_PARAM_IMM_DATA_EN
:
3514 *value
= session
->imm_data_en
;
3516 case ISCSI_PARAM_FIRST_BURST
:
3517 *value
= session
->first_burst
;
3519 case ISCSI_PARAM_MAX_BURST
:
3520 *value
= session
->max_burst
;
3522 case ISCSI_PARAM_PDU_INORDER_EN
:
3523 *value
= session
->pdu_inorder_en
;
3525 case ISCSI_PARAM_DATASEQ_INORDER_EN
:
3526 *value
= session
->dataseq_inorder_en
;
3528 case ISCSI_PARAM_ERL
:
3529 *value
= session
->erl
;
3531 case ISCSI_PARAM_IFMARKER_EN
:
3532 *value
= session
->ifmarker_en
;
3534 case ISCSI_PARAM_OFMARKER_EN
:
3535 *value
= session
->ofmarker_en
;
3538 return ISCSI_ERR_PARAM_NOT_FOUND
;
3545 iscsi_conn_get_param(struct iscsi_cls_conn
*cls_conn
,
3546 enum iscsi_param param
, uint32_t *value
)
3548 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
3551 case ISCSI_PARAM_MAX_RECV_DLENGTH
:
3552 *value
= conn
->max_recv_dlength
;
3554 case ISCSI_PARAM_MAX_XMIT_DLENGTH
:
3555 *value
= conn
->max_xmit_dlength
;
3557 case ISCSI_PARAM_HDRDGST_EN
:
3558 *value
= conn
->hdrdgst_en
;
3560 case ISCSI_PARAM_DATADGST_EN
:
3561 *value
= conn
->datadgst_en
;
3564 return ISCSI_ERR_PARAM_NOT_FOUND
;
3571 iscsi_conn_get_stats(struct iscsi_cls_conn
*cls_conn
, struct iscsi_stats
*stats
)
3573 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
3575 stats
->txdata_octets
= conn
->txdata_octets
;
3576 stats
->rxdata_octets
= conn
->rxdata_octets
;
3577 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
3578 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
3579 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
3580 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
3581 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
3582 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
3583 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
3584 stats
->custom_length
= 3;
3585 strcpy(stats
->custom
[0].desc
, "tx_sendpage_failures");
3586 stats
->custom
[0].value
= conn
->sendpage_failures_cnt
;
3587 strcpy(stats
->custom
[1].desc
, "rx_discontiguous_hdr");
3588 stats
->custom
[1].value
= conn
->discontiguous_hdr_cnt
;
3589 strcpy(stats
->custom
[2].desc
, "eh_abort_cnt");
3590 stats
->custom
[2].value
= conn
->eh_abort_cnt
;
3594 iscsi_conn_send_pdu(struct iscsi_cls_conn
*cls_conn
, struct iscsi_hdr
*hdr
,
3595 char *data
, uint32_t data_size
)
3597 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
3600 mutex_lock(&conn
->xmitmutex
);
3601 rc
= iscsi_conn_send_generic(conn
, hdr
, data
, data_size
);
3602 mutex_unlock(&conn
->xmitmutex
);
3607 static struct iscsi_transport iscsi_tcp_transport
= {
3608 .owner
= THIS_MODULE
,
3610 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
3612 .host_template
= &iscsi_sht
,
3613 .hostdata_size
= sizeof(struct iscsi_session
),
3614 .conndata_size
= sizeof(struct iscsi_conn
),
3616 .max_cmd_len
= ISCSI_TCP_MAX_CMD_LEN
,
3617 .create_session
= iscsi_session_create
,
3618 .destroy_session
= iscsi_session_destroy
,
3619 .create_conn
= iscsi_conn_create
,
3620 .bind_conn
= iscsi_conn_bind
,
3621 .destroy_conn
= iscsi_conn_destroy
,
3622 .set_param
= iscsi_conn_set_param
,
3623 .get_conn_param
= iscsi_conn_get_param
,
3624 .get_session_param
= iscsi_session_get_param
,
3625 .start_conn
= iscsi_conn_start
,
3626 .stop_conn
= iscsi_conn_stop
,
3627 .send_pdu
= iscsi_conn_send_pdu
,
3628 .get_stats
= iscsi_conn_get_stats
,
3632 iscsi_tcp_init(void)
3634 if (iscsi_max_lun
< 1) {
3635 printk(KERN_ERR
"Invalid max_lun value of %u\n", iscsi_max_lun
);
3638 iscsi_tcp_transport
.max_lun
= iscsi_max_lun
;
3640 taskcache
= kmem_cache_create("iscsi_taskcache",
3641 sizeof(struct iscsi_data_task
), 0,
3642 SLAB_HWCACHE_ALIGN
| SLAB_NO_REAP
, NULL
, NULL
);
3646 if (!iscsi_register_transport(&iscsi_tcp_transport
))
3647 kmem_cache_destroy(taskcache
);
3653 iscsi_tcp_exit(void)
3655 iscsi_unregister_transport(&iscsi_tcp_transport
);
3656 kmem_cache_destroy(taskcache
);
3659 module_init(iscsi_tcp_init
);
3660 module_exit(iscsi_tcp_exit
);