2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 - 2006 Mike Christie
7 * Copyright (C) 2006 Red Hat, Inc. All rights reserved.
8 * maintained by open-iscsi@googlegroups.com
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published
12 * by the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * See the file COPYING included with this distribution for more details.
29 #include <linux/types.h>
30 #include <linux/list.h>
31 #include <linux/inet.h>
32 #include <linux/blkdev.h>
33 #include <linux/crypto.h>
34 #include <linux/delay.h>
35 #include <linux/kfifo.h>
36 #include <linux/scatterlist.h>
37 #include <linux/mutex.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_host.h>
41 #include <scsi/scsi.h>
42 #include <scsi/scsi_transport_iscsi.h>
44 #include "iscsi_tcp.h"
46 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
47 "Alex Aizman <itn780@yahoo.com>");
48 MODULE_DESCRIPTION("iSCSI/TCP data-path");
49 MODULE_LICENSE("GPL");
50 /* #define DEBUG_TCP */
54 #define debug_tcp(fmt...) printk(KERN_INFO "tcp: " fmt)
56 #define debug_tcp(fmt...)
66 static unsigned int iscsi_max_lun
= 512;
67 module_param_named(max_lun
, iscsi_max_lun
, uint
, S_IRUGO
);
70 iscsi_buf_init_iov(struct iscsi_buf
*ibuf
, char *vbuf
, int size
)
72 ibuf
->sg
.page
= virt_to_page(vbuf
);
73 ibuf
->sg
.offset
= offset_in_page(vbuf
);
74 ibuf
->sg
.length
= size
;
76 ibuf
->use_sendmsg
= 1;
80 iscsi_buf_init_sg(struct iscsi_buf
*ibuf
, struct scatterlist
*sg
)
82 ibuf
->sg
.page
= sg
->page
;
83 ibuf
->sg
.offset
= sg
->offset
;
84 ibuf
->sg
.length
= sg
->length
;
86 * Fastpath: sg element fits into single page
88 if (sg
->length
+ sg
->offset
<= PAGE_SIZE
&& !PageSlab(sg
->page
))
89 ibuf
->use_sendmsg
= 0;
91 ibuf
->use_sendmsg
= 1;
96 iscsi_buf_left(struct iscsi_buf
*ibuf
)
100 rc
= ibuf
->sg
.length
- ibuf
->sent
;
106 iscsi_hdr_digest(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
109 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
111 crypto_hash_digest(&tcp_conn
->tx_hash
, &buf
->sg
, buf
->sg
.length
, crc
);
112 buf
->sg
.length
= tcp_conn
->hdr_size
;
116 iscsi_hdr_extract(struct iscsi_tcp_conn
*tcp_conn
)
118 struct sk_buff
*skb
= tcp_conn
->in
.skb
;
120 tcp_conn
->in
.zero_copy_hdr
= 0;
122 if (tcp_conn
->in
.copy
>= tcp_conn
->hdr_size
&&
123 tcp_conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
) {
125 * Zero-copy PDU Header: using connection context
126 * to store header pointer.
128 if (skb_shinfo(skb
)->frag_list
== NULL
&&
129 !skb_shinfo(skb
)->nr_frags
) {
130 tcp_conn
->in
.hdr
= (struct iscsi_hdr
*)
131 ((char*)skb
->data
+ tcp_conn
->in
.offset
);
132 tcp_conn
->in
.zero_copy_hdr
= 1;
134 /* ignoring return code since we checked
136 skb_copy_bits(skb
, tcp_conn
->in
.offset
,
137 &tcp_conn
->hdr
, tcp_conn
->hdr_size
);
138 tcp_conn
->in
.hdr
= &tcp_conn
->hdr
;
140 tcp_conn
->in
.offset
+= tcp_conn
->hdr_size
;
141 tcp_conn
->in
.copy
-= tcp_conn
->hdr_size
;
147 * PDU header scattered across SKB's,
148 * copying it... This'll happen quite rarely.
151 if (tcp_conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
)
152 tcp_conn
->in
.hdr_offset
= 0;
154 hdr_remains
= tcp_conn
->hdr_size
- tcp_conn
->in
.hdr_offset
;
155 BUG_ON(hdr_remains
<= 0);
157 copylen
= min(tcp_conn
->in
.copy
, hdr_remains
);
158 skb_copy_bits(skb
, tcp_conn
->in
.offset
,
159 (char*)&tcp_conn
->hdr
+ tcp_conn
->in
.hdr_offset
,
162 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
163 "in.copy %d\n", tcp_conn
->in
.hdr_offset
, copylen
,
164 tcp_conn
->in
.offset
, tcp_conn
->in
.copy
);
166 tcp_conn
->in
.offset
+= copylen
;
167 tcp_conn
->in
.copy
-= copylen
;
168 if (copylen
< hdr_remains
) {
169 tcp_conn
->in_progress
= IN_PROGRESS_HEADER_GATHER
;
170 tcp_conn
->in
.hdr_offset
+= copylen
;
173 tcp_conn
->in
.hdr
= &tcp_conn
->hdr
;
174 tcp_conn
->discontiguous_hdr_cnt
++;
175 tcp_conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
182 * must be called with session lock
185 iscsi_tcp_cleanup_ctask(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
187 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
188 struct iscsi_r2t_info
*r2t
;
189 struct scsi_cmnd
*sc
;
191 /* flush ctask's r2t queues */
192 while (__kfifo_get(tcp_ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*))) {
193 __kfifo_put(tcp_ctask
->r2tpool
.queue
, (void*)&r2t
,
195 debug_scsi("iscsi_tcp_cleanup_ctask pending r2t dropped\n");
202 tcp_ctask
->xmstate
= XMSTATE_IDLE
;
203 tcp_ctask
->r2t
= NULL
;
207 * iscsi_data_rsp - SCSI Data-In Response processing
208 * @conn: iscsi connection
209 * @ctask: scsi command task
212 iscsi_data_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
215 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
216 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
217 struct iscsi_data_rsp
*rhdr
= (struct iscsi_data_rsp
*)tcp_conn
->in
.hdr
;
218 struct iscsi_session
*session
= conn
->session
;
219 int datasn
= be32_to_cpu(rhdr
->datasn
);
221 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
225 * setup Data-In byte counter (gets decremented..)
227 ctask
->data_count
= tcp_conn
->in
.datalen
;
229 if (tcp_conn
->in
.datalen
== 0)
232 if (ctask
->datasn
!= datasn
)
233 return ISCSI_ERR_DATASN
;
237 tcp_ctask
->data_offset
= be32_to_cpu(rhdr
->offset
);
238 if (tcp_ctask
->data_offset
+ tcp_conn
->in
.datalen
> ctask
->total_length
)
239 return ISCSI_ERR_DATA_OFFSET
;
241 if (rhdr
->flags
& ISCSI_FLAG_DATA_STATUS
) {
242 struct scsi_cmnd
*sc
= ctask
->sc
;
244 conn
->exp_statsn
= be32_to_cpu(rhdr
->statsn
) + 1;
245 if (rhdr
->flags
& ISCSI_FLAG_DATA_UNDERFLOW
) {
246 int res_count
= be32_to_cpu(rhdr
->residual_count
);
249 res_count
<= sc
->request_bufflen
) {
250 sc
->resid
= res_count
;
251 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
253 sc
->result
= (DID_BAD_TARGET
<< 16) |
255 } else if (rhdr
->flags
& ISCSI_FLAG_DATA_OVERFLOW
) {
256 sc
->resid
= be32_to_cpu(rhdr
->residual_count
);
257 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
259 sc
->result
= (DID_OK
<< 16) | rhdr
->cmd_status
;
262 conn
->datain_pdus_cnt
++;
267 * iscsi_solicit_data_init - initialize first Data-Out
268 * @conn: iscsi connection
269 * @ctask: scsi command task
273 * Initialize first Data-Out within this R2T sequence and finds
274 * proper data_offset within this SCSI command.
276 * This function is called with connection lock taken.
279 iscsi_solicit_data_init(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
280 struct iscsi_r2t_info
*r2t
)
282 struct iscsi_data
*hdr
;
283 struct scsi_cmnd
*sc
= ctask
->sc
;
285 hdr
= &r2t
->dtask
.hdr
;
286 memset(hdr
, 0, sizeof(struct iscsi_data
));
288 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
289 r2t
->solicit_datasn
++;
290 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
291 memcpy(hdr
->lun
, ctask
->hdr
->lun
, sizeof(hdr
->lun
));
292 hdr
->itt
= ctask
->hdr
->itt
;
293 hdr
->exp_statsn
= r2t
->exp_statsn
;
294 hdr
->offset
= cpu_to_be32(r2t
->data_offset
);
295 if (r2t
->data_length
> conn
->max_xmit_dlength
) {
296 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
297 r2t
->data_count
= conn
->max_xmit_dlength
;
300 hton24(hdr
->dlength
, r2t
->data_length
);
301 r2t
->data_count
= r2t
->data_length
;
302 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
304 conn
->dataout_pdus_cnt
++;
308 iscsi_buf_init_iov(&r2t
->headbuf
, (char*)hdr
,
309 sizeof(struct iscsi_hdr
));
313 struct scatterlist
*sg
= sc
->request_buffer
;
316 for (i
= 0; i
< sc
->use_sg
; i
++, sg
+= 1) {
317 /* FIXME: prefetch ? */
318 if (sg_count
+ sg
->length
> r2t
->data_offset
) {
323 /* offset within this page */
324 page_offset
= r2t
->data_offset
- sg_count
;
326 /* fill in this buffer */
327 iscsi_buf_init_sg(&r2t
->sendbuf
, sg
);
328 r2t
->sendbuf
.sg
.offset
+= page_offset
;
329 r2t
->sendbuf
.sg
.length
-= page_offset
;
331 /* xmit logic will continue with next one */
335 sg_count
+= sg
->length
;
337 BUG_ON(r2t
->sg
== NULL
);
339 iscsi_buf_init_iov(&r2t
->sendbuf
,
340 (char*)sc
->request_buffer
+ r2t
->data_offset
,
347 * iscsi_r2t_rsp - iSCSI R2T Response processing
348 * @conn: iscsi connection
349 * @ctask: scsi command task
352 iscsi_r2t_rsp(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
354 struct iscsi_r2t_info
*r2t
;
355 struct iscsi_session
*session
= conn
->session
;
356 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
357 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
358 struct iscsi_r2t_rsp
*rhdr
= (struct iscsi_r2t_rsp
*)tcp_conn
->in
.hdr
;
359 int r2tsn
= be32_to_cpu(rhdr
->r2tsn
);
362 if (tcp_conn
->in
.datalen
) {
363 printk(KERN_ERR
"iscsi_tcp: invalid R2t with datalen %d\n",
364 tcp_conn
->in
.datalen
);
365 return ISCSI_ERR_DATALEN
;
368 if (tcp_ctask
->exp_r2tsn
&& tcp_ctask
->exp_r2tsn
!= r2tsn
)
369 return ISCSI_ERR_R2TSN
;
371 rc
= iscsi_check_assign_cmdsn(session
, (struct iscsi_nopin
*)rhdr
);
375 /* FIXME: use R2TSN to detect missing R2T */
377 /* fill-in new R2T associated with the task */
378 spin_lock(&session
->lock
);
379 if (!ctask
->sc
|| ctask
->mtask
||
380 session
->state
!= ISCSI_STATE_LOGGED_IN
) {
381 printk(KERN_INFO
"iscsi_tcp: dropping R2T itt %d in "
382 "recovery...\n", ctask
->itt
);
383 spin_unlock(&session
->lock
);
387 rc
= __kfifo_get(tcp_ctask
->r2tpool
.queue
, (void*)&r2t
, sizeof(void*));
390 r2t
->exp_statsn
= rhdr
->statsn
;
391 r2t
->data_length
= be32_to_cpu(rhdr
->data_length
);
392 if (r2t
->data_length
== 0) {
393 printk(KERN_ERR
"iscsi_tcp: invalid R2T with zero data len\n");
394 spin_unlock(&session
->lock
);
395 return ISCSI_ERR_DATALEN
;
398 if (r2t
->data_length
> session
->max_burst
)
399 debug_scsi("invalid R2T with data len %u and max burst %u."
400 "Attempting to execute request.\n",
401 r2t
->data_length
, session
->max_burst
);
403 r2t
->data_offset
= be32_to_cpu(rhdr
->data_offset
);
404 if (r2t
->data_offset
+ r2t
->data_length
> ctask
->total_length
) {
405 spin_unlock(&session
->lock
);
406 printk(KERN_ERR
"iscsi_tcp: invalid R2T with data len %u at "
407 "offset %u and total length %d\n", r2t
->data_length
,
408 r2t
->data_offset
, ctask
->total_length
);
409 return ISCSI_ERR_DATALEN
;
412 r2t
->ttt
= rhdr
->ttt
; /* no flip */
413 r2t
->solicit_datasn
= 0;
415 iscsi_solicit_data_init(conn
, ctask
, r2t
);
417 tcp_ctask
->exp_r2tsn
= r2tsn
+ 1;
418 __kfifo_put(tcp_ctask
->r2tqueue
, (void*)&r2t
, sizeof(void*));
419 tcp_ctask
->xmstate
|= XMSTATE_SOL_HDR
;
420 list_move_tail(&ctask
->running
, &conn
->xmitqueue
);
422 scsi_queue_work(session
->host
, &conn
->xmitwork
);
423 conn
->r2t_pdus_cnt
++;
424 spin_unlock(&session
->lock
);
430 iscsi_tcp_hdr_recv(struct iscsi_conn
*conn
)
432 int rc
= 0, opcode
, ahslen
;
433 struct iscsi_hdr
*hdr
;
434 struct iscsi_session
*session
= conn
->session
;
435 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
436 uint32_t cdgst
, rdgst
= 0, itt
;
438 hdr
= tcp_conn
->in
.hdr
;
440 /* verify PDU length */
441 tcp_conn
->in
.datalen
= ntoh24(hdr
->dlength
);
442 if (tcp_conn
->in
.datalen
> conn
->max_recv_dlength
) {
443 printk(KERN_ERR
"iscsi_tcp: datalen %d > %d\n",
444 tcp_conn
->in
.datalen
, conn
->max_recv_dlength
);
445 return ISCSI_ERR_DATALEN
;
447 tcp_conn
->data_copied
= 0;
450 ahslen
= hdr
->hlength
<< 2;
451 tcp_conn
->in
.offset
+= ahslen
;
452 tcp_conn
->in
.copy
-= ahslen
;
453 if (tcp_conn
->in
.copy
< 0) {
454 printk(KERN_ERR
"iscsi_tcp: can't handle AHS with length "
455 "%d bytes\n", ahslen
);
456 return ISCSI_ERR_AHSLEN
;
459 /* calculate read padding */
460 tcp_conn
->in
.padding
= tcp_conn
->in
.datalen
& (ISCSI_PAD_LEN
-1);
461 if (tcp_conn
->in
.padding
) {
462 tcp_conn
->in
.padding
= ISCSI_PAD_LEN
- tcp_conn
->in
.padding
;
463 debug_scsi("read padding %d bytes\n", tcp_conn
->in
.padding
);
466 if (conn
->hdrdgst_en
) {
467 struct scatterlist sg
;
469 sg_init_one(&sg
, (u8
*)hdr
,
470 sizeof(struct iscsi_hdr
) + ahslen
);
471 crypto_hash_digest(&tcp_conn
->rx_hash
, &sg
, sg
.length
,
473 rdgst
= *(uint32_t*)((char*)hdr
+ sizeof(struct iscsi_hdr
) +
475 if (cdgst
!= rdgst
) {
476 printk(KERN_ERR
"iscsi_tcp: hdrdgst error "
477 "recv 0x%x calc 0x%x\n", rdgst
, cdgst
);
478 return ISCSI_ERR_HDR_DGST
;
482 opcode
= hdr
->opcode
& ISCSI_OPCODE_MASK
;
483 /* verify itt (itt encoding: age+cid+itt) */
484 rc
= iscsi_verify_itt(conn
, hdr
, &itt
);
485 if (rc
== ISCSI_ERR_NO_SCSI_CMD
) {
486 tcp_conn
->in
.datalen
= 0; /* force drop */
491 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
492 opcode
, tcp_conn
->in
.offset
, tcp_conn
->in
.copy
,
493 ahslen
, tcp_conn
->in
.datalen
);
496 case ISCSI_OP_SCSI_DATA_IN
:
497 tcp_conn
->in
.ctask
= session
->cmds
[itt
];
498 rc
= iscsi_data_rsp(conn
, tcp_conn
->in
.ctask
);
502 case ISCSI_OP_SCSI_CMD_RSP
:
503 tcp_conn
->in
.ctask
= session
->cmds
[itt
];
504 if (tcp_conn
->in
.datalen
)
507 spin_lock(&session
->lock
);
508 rc
= __iscsi_complete_pdu(conn
, hdr
, NULL
, 0);
509 spin_unlock(&session
->lock
);
512 tcp_conn
->in
.ctask
= session
->cmds
[itt
];
514 rc
= ISCSI_ERR_AHSLEN
;
515 else if (tcp_conn
->in
.ctask
->sc
->sc_data_direction
==
517 rc
= iscsi_r2t_rsp(conn
, tcp_conn
->in
.ctask
);
519 rc
= ISCSI_ERR_PROTO
;
521 case ISCSI_OP_LOGIN_RSP
:
522 case ISCSI_OP_TEXT_RSP
:
523 case ISCSI_OP_REJECT
:
524 case ISCSI_OP_ASYNC_EVENT
:
526 * It is possible that we could get a PDU with a buffer larger
527 * than 8K, but there are no targets that currently do this.
528 * For now we fail until we find a vendor that needs it
530 if (ISCSI_DEF_MAX_RECV_SEG_LEN
<
531 tcp_conn
->in
.datalen
) {
532 printk(KERN_ERR
"iscsi_tcp: received buffer of len %u "
533 "but conn buffer is only %u (opcode %0x)\n",
534 tcp_conn
->in
.datalen
,
535 ISCSI_DEF_MAX_RECV_SEG_LEN
, opcode
);
536 rc
= ISCSI_ERR_PROTO
;
540 if (tcp_conn
->in
.datalen
)
543 case ISCSI_OP_LOGOUT_RSP
:
544 case ISCSI_OP_NOOP_IN
:
545 case ISCSI_OP_SCSI_TMFUNC_RSP
:
546 rc
= iscsi_complete_pdu(conn
, hdr
, NULL
, 0);
549 rc
= ISCSI_ERR_BAD_OPCODE
;
557 * if we did zero copy for the header but we will need multiple
558 * skbs to complete the command then we have to copy the header
561 if (tcp_conn
->in
.zero_copy_hdr
&& tcp_conn
->in
.copy
<=
562 (tcp_conn
->in
.datalen
+ tcp_conn
->in
.padding
+
563 (conn
->datadgst_en
? 4 : 0))) {
564 debug_tcp("Copying header for later use. in.copy %d in.datalen"
565 " %d\n", tcp_conn
->in
.copy
, tcp_conn
->in
.datalen
);
566 memcpy(&tcp_conn
->hdr
, tcp_conn
->in
.hdr
,
567 sizeof(struct iscsi_hdr
));
568 tcp_conn
->in
.hdr
= &tcp_conn
->hdr
;
569 tcp_conn
->in
.zero_copy_hdr
= 0;
575 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
576 * @conn: iscsi tcp connection
577 * @ctask: scsi command task
578 * @buf: buffer to copy to
579 * @buf_size: size of buffer
580 * @offset: offset within the buffer
583 * The function calls skb_copy_bits() and updates per-connection and
584 * per-cmd byte counters.
586 * Read counters (in bytes):
588 * conn->in.offset offset within in progress SKB
589 * conn->in.copy left to copy from in progress SKB
591 * conn->in.copied copied already from in progress SKB
592 * conn->data_copied copied already from in progress buffer
593 * ctask->sent total bytes sent up to the MidLayer
594 * ctask->data_count left to copy from in progress Data-In
595 * buf_left left to copy from in progress buffer
598 iscsi_ctask_copy(struct iscsi_tcp_conn
*tcp_conn
, struct iscsi_cmd_task
*ctask
,
599 void *buf
, int buf_size
, int offset
)
601 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
602 int buf_left
= buf_size
- (tcp_conn
->data_copied
+ offset
);
603 int size
= min(tcp_conn
->in
.copy
, buf_left
);
606 size
= min(size
, ctask
->data_count
);
608 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
609 size
, tcp_conn
->in
.offset
, tcp_conn
->in
.copied
);
612 BUG_ON(tcp_ctask
->sent
+ size
> ctask
->total_length
);
614 rc
= skb_copy_bits(tcp_conn
->in
.skb
, tcp_conn
->in
.offset
,
615 (char*)buf
+ (offset
+ tcp_conn
->data_copied
), size
);
616 /* must fit into skb->len */
619 tcp_conn
->in
.offset
+= size
;
620 tcp_conn
->in
.copy
-= size
;
621 tcp_conn
->in
.copied
+= size
;
622 tcp_conn
->data_copied
+= size
;
623 tcp_ctask
->sent
+= size
;
624 ctask
->data_count
-= size
;
626 BUG_ON(tcp_conn
->in
.copy
< 0);
627 BUG_ON(ctask
->data_count
< 0);
629 if (buf_size
!= (tcp_conn
->data_copied
+ offset
)) {
630 if (!ctask
->data_count
) {
631 BUG_ON(buf_size
- tcp_conn
->data_copied
< 0);
632 /* done with this PDU */
633 return buf_size
- tcp_conn
->data_copied
;
638 /* done with this buffer or with both - PDU and buffer */
639 tcp_conn
->data_copied
= 0;
644 * iscsi_tcp_copy - copy skb bits to the destanation buffer
645 * @conn: iscsi tcp connection
648 * The function calls skb_copy_bits() and updates per-connection
652 iscsi_tcp_copy(struct iscsi_conn
*conn
, int buf_size
)
654 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
655 int buf_left
= buf_size
- tcp_conn
->data_copied
;
656 int size
= min(tcp_conn
->in
.copy
, buf_left
);
659 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
660 size
, tcp_conn
->in
.offset
, tcp_conn
->data_copied
);
663 rc
= skb_copy_bits(tcp_conn
->in
.skb
, tcp_conn
->in
.offset
,
664 (char*)conn
->data
+ tcp_conn
->data_copied
, size
);
667 tcp_conn
->in
.offset
+= size
;
668 tcp_conn
->in
.copy
-= size
;
669 tcp_conn
->in
.copied
+= size
;
670 tcp_conn
->data_copied
+= size
;
672 if (buf_size
!= tcp_conn
->data_copied
)
679 partial_sg_digest_update(struct hash_desc
*desc
, struct scatterlist
*sg
,
680 int offset
, int length
)
682 struct scatterlist temp
;
684 memcpy(&temp
, sg
, sizeof(struct scatterlist
));
685 temp
.offset
= offset
;
686 temp
.length
= length
;
687 crypto_hash_update(desc
, &temp
, length
);
691 iscsi_recv_digest_update(struct iscsi_tcp_conn
*tcp_conn
, char* buf
, int len
)
693 struct scatterlist tmp
;
695 sg_init_one(&tmp
, buf
, len
);
696 crypto_hash_update(&tcp_conn
->rx_hash
, &tmp
, len
);
699 static int iscsi_scsi_data_in(struct iscsi_conn
*conn
)
701 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
702 struct iscsi_cmd_task
*ctask
= tcp_conn
->in
.ctask
;
703 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
704 struct scsi_cmnd
*sc
= ctask
->sc
;
705 struct scatterlist
*sg
;
706 int i
, offset
, rc
= 0;
708 BUG_ON((void*)ctask
!= sc
->SCp
.ptr
);
711 * copying Data-In into the Scsi_Cmnd
714 i
= ctask
->data_count
;
715 rc
= iscsi_ctask_copy(tcp_conn
, ctask
, sc
->request_buffer
,
717 tcp_ctask
->data_offset
);
720 if (conn
->datadgst_en
)
721 iscsi_recv_digest_update(tcp_conn
, sc
->request_buffer
,
727 offset
= tcp_ctask
->data_offset
;
728 sg
= sc
->request_buffer
;
730 if (tcp_ctask
->data_offset
)
731 for (i
= 0; i
< tcp_ctask
->sg_count
; i
++)
732 offset
-= sg
[i
].length
;
733 /* we've passed through partial sg*/
737 for (i
= tcp_ctask
->sg_count
; i
< sc
->use_sg
; i
++) {
740 dest
= kmap_atomic(sg
[i
].page
, KM_SOFTIRQ0
);
741 rc
= iscsi_ctask_copy(tcp_conn
, ctask
, dest
+ sg
[i
].offset
,
742 sg
[i
].length
, offset
);
743 kunmap_atomic(dest
, KM_SOFTIRQ0
);
745 /* continue with the next SKB/PDU */
748 if (conn
->datadgst_en
) {
752 &sg
[i
], sg
[i
].length
);
754 partial_sg_digest_update(
757 sg
[i
].offset
+ offset
,
758 sg
[i
].length
- offset
);
761 tcp_ctask
->sg_count
++;
764 if (!ctask
->data_count
) {
765 if (rc
&& conn
->datadgst_en
)
767 * data-in is complete, but buffer not...
769 partial_sg_digest_update(&tcp_conn
->rx_hash
,
777 if (!tcp_conn
->in
.copy
)
780 BUG_ON(ctask
->data_count
);
783 /* check for non-exceptional status */
784 if (tcp_conn
->in
.hdr
->flags
& ISCSI_FLAG_DATA_STATUS
) {
785 debug_scsi("done [sc %lx res %d itt 0x%x flags 0x%x]\n",
786 (long)sc
, sc
->result
, ctask
->itt
,
787 tcp_conn
->in
.hdr
->flags
);
788 spin_lock(&conn
->session
->lock
);
789 __iscsi_complete_pdu(conn
, tcp_conn
->in
.hdr
, NULL
, 0);
790 spin_unlock(&conn
->session
->lock
);
797 iscsi_data_recv(struct iscsi_conn
*conn
)
799 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
802 opcode
= tcp_conn
->in
.hdr
->opcode
& ISCSI_OPCODE_MASK
;
804 case ISCSI_OP_SCSI_DATA_IN
:
805 rc
= iscsi_scsi_data_in(conn
);
807 case ISCSI_OP_SCSI_CMD_RSP
:
808 case ISCSI_OP_TEXT_RSP
:
809 case ISCSI_OP_LOGIN_RSP
:
810 case ISCSI_OP_ASYNC_EVENT
:
811 case ISCSI_OP_REJECT
:
813 * Collect data segment to the connection's data
816 if (iscsi_tcp_copy(conn
, tcp_conn
->in
.datalen
)) {
821 rc
= iscsi_complete_pdu(conn
, tcp_conn
->in
.hdr
, conn
->data
,
822 tcp_conn
->in
.datalen
);
823 if (!rc
&& conn
->datadgst_en
&& opcode
!= ISCSI_OP_LOGIN_RSP
)
824 iscsi_recv_digest_update(tcp_conn
, conn
->data
,
825 tcp_conn
->in
.datalen
);
835 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
836 * @rd_desc: read descriptor
837 * @skb: socket buffer
838 * @offset: offset in skb
839 * @len: skb->len - offset
842 iscsi_tcp_data_recv(read_descriptor_t
*rd_desc
, struct sk_buff
*skb
,
843 unsigned int offset
, size_t len
)
846 struct iscsi_conn
*conn
= rd_desc
->arg
.data
;
847 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
849 char pad
[ISCSI_PAD_LEN
];
850 struct scatterlist sg
;
853 * Save current SKB and its offset in the corresponding
854 * connection context.
856 tcp_conn
->in
.copy
= skb
->len
- offset
;
857 tcp_conn
->in
.offset
= offset
;
858 tcp_conn
->in
.skb
= skb
;
859 tcp_conn
->in
.len
= tcp_conn
->in
.copy
;
860 BUG_ON(tcp_conn
->in
.copy
<= 0);
861 debug_tcp("in %d bytes\n", tcp_conn
->in
.copy
);
864 tcp_conn
->in
.copied
= 0;
867 if (unlikely(conn
->suspend_rx
)) {
868 debug_tcp("conn %d Rx suspended!\n", conn
->id
);
872 if (tcp_conn
->in_progress
== IN_PROGRESS_WAIT_HEADER
||
873 tcp_conn
->in_progress
== IN_PROGRESS_HEADER_GATHER
) {
874 rc
= iscsi_hdr_extract(tcp_conn
);
879 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
885 * Verify and process incoming PDU header.
887 rc
= iscsi_tcp_hdr_recv(conn
);
888 if (!rc
&& tcp_conn
->in
.datalen
) {
889 if (conn
->datadgst_en
)
890 crypto_hash_init(&tcp_conn
->rx_hash
);
891 tcp_conn
->in_progress
= IN_PROGRESS_DATA_RECV
;
893 iscsi_conn_failure(conn
, rc
);
898 if (tcp_conn
->in_progress
== IN_PROGRESS_DDIGEST_RECV
) {
899 uint32_t recv_digest
;
901 debug_tcp("extra data_recv offset %d copy %d\n",
902 tcp_conn
->in
.offset
, tcp_conn
->in
.copy
);
903 rc
= iscsi_tcp_copy(conn
, sizeof(uint32_t));
907 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
911 memcpy(&recv_digest
, conn
->data
, sizeof(uint32_t));
912 if (recv_digest
!= tcp_conn
->in
.datadgst
) {
913 debug_tcp("iscsi_tcp: data digest error!"
914 "0x%x != 0x%x\n", recv_digest
,
915 tcp_conn
->in
.datadgst
);
916 iscsi_conn_failure(conn
, ISCSI_ERR_DATA_DGST
);
919 debug_tcp("iscsi_tcp: data digest match!"
920 "0x%x == 0x%x\n", recv_digest
,
921 tcp_conn
->in
.datadgst
);
922 tcp_conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
926 if (tcp_conn
->in_progress
== IN_PROGRESS_DATA_RECV
&&
929 debug_tcp("data_recv offset %d copy %d\n",
930 tcp_conn
->in
.offset
, tcp_conn
->in
.copy
);
932 rc
= iscsi_data_recv(conn
);
936 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
939 tcp_conn
->in
.copy
-= tcp_conn
->in
.padding
;
940 tcp_conn
->in
.offset
+= tcp_conn
->in
.padding
;
941 if (conn
->datadgst_en
) {
942 if (tcp_conn
->in
.padding
) {
943 debug_tcp("padding -> %d\n",
944 tcp_conn
->in
.padding
);
945 memset(pad
, 0, tcp_conn
->in
.padding
);
946 sg_init_one(&sg
, pad
, tcp_conn
->in
.padding
);
947 crypto_hash_update(&tcp_conn
->rx_hash
,
950 crypto_hash_final(&tcp_conn
->rx_hash
,
951 (u8
*) &tcp_conn
->in
.datadgst
);
952 debug_tcp("rx digest 0x%x\n", tcp_conn
->in
.datadgst
);
953 tcp_conn
->in_progress
= IN_PROGRESS_DDIGEST_RECV
;
954 tcp_conn
->data_copied
= 0;
956 tcp_conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
959 debug_tcp("f, processed %d from out of %d padding %d\n",
960 tcp_conn
->in
.offset
- offset
, (int)len
, tcp_conn
->in
.padding
);
961 BUG_ON(tcp_conn
->in
.offset
- offset
> len
);
963 if (tcp_conn
->in
.offset
- offset
!= len
) {
964 debug_tcp("continue to process %d bytes\n",
965 (int)len
- (tcp_conn
->in
.offset
- offset
));
970 processed
= tcp_conn
->in
.offset
- offset
;
971 BUG_ON(processed
== 0);
975 processed
= tcp_conn
->in
.offset
- offset
;
976 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
977 processed
, (int)len
, (int)rd_desc
->count
);
978 BUG_ON(processed
== 0);
979 BUG_ON(processed
> len
);
981 conn
->rxdata_octets
+= processed
;
986 iscsi_tcp_data_ready(struct sock
*sk
, int flag
)
988 struct iscsi_conn
*conn
= sk
->sk_user_data
;
989 read_descriptor_t rd_desc
;
991 read_lock(&sk
->sk_callback_lock
);
994 * Use rd_desc to pass 'conn' to iscsi_tcp_data_recv.
995 * We set count to 1 because we want the network layer to
996 * hand us all the skbs that are available. iscsi_tcp_data_recv
997 * handled pdus that cross buffers or pdus that still need data.
999 rd_desc
.arg
.data
= conn
;
1001 tcp_read_sock(sk
, &rd_desc
, iscsi_tcp_data_recv
);
1003 read_unlock(&sk
->sk_callback_lock
);
1007 iscsi_tcp_state_change(struct sock
*sk
)
1009 struct iscsi_tcp_conn
*tcp_conn
;
1010 struct iscsi_conn
*conn
;
1011 struct iscsi_session
*session
;
1012 void (*old_state_change
)(struct sock
*);
1014 read_lock(&sk
->sk_callback_lock
);
1016 conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1017 session
= conn
->session
;
1019 if ((sk
->sk_state
== TCP_CLOSE_WAIT
||
1020 sk
->sk_state
== TCP_CLOSE
) &&
1021 !atomic_read(&sk
->sk_rmem_alloc
)) {
1022 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1023 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1026 tcp_conn
= conn
->dd_data
;
1027 old_state_change
= tcp_conn
->old_state_change
;
1029 read_unlock(&sk
->sk_callback_lock
);
1031 old_state_change(sk
);
1035 * iscsi_write_space - Called when more output buffer space is available
1036 * @sk: socket space is available for
1039 iscsi_write_space(struct sock
*sk
)
1041 struct iscsi_conn
*conn
= (struct iscsi_conn
*)sk
->sk_user_data
;
1042 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1044 tcp_conn
->old_write_space(sk
);
1045 debug_tcp("iscsi_write_space: cid %d\n", conn
->id
);
1046 scsi_queue_work(conn
->session
->host
, &conn
->xmitwork
);
1050 iscsi_conn_set_callbacks(struct iscsi_conn
*conn
)
1052 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1053 struct sock
*sk
= tcp_conn
->sock
->sk
;
1055 /* assign new callbacks */
1056 write_lock_bh(&sk
->sk_callback_lock
);
1057 sk
->sk_user_data
= conn
;
1058 tcp_conn
->old_data_ready
= sk
->sk_data_ready
;
1059 tcp_conn
->old_state_change
= sk
->sk_state_change
;
1060 tcp_conn
->old_write_space
= sk
->sk_write_space
;
1061 sk
->sk_data_ready
= iscsi_tcp_data_ready
;
1062 sk
->sk_state_change
= iscsi_tcp_state_change
;
1063 sk
->sk_write_space
= iscsi_write_space
;
1064 write_unlock_bh(&sk
->sk_callback_lock
);
1068 iscsi_conn_restore_callbacks(struct iscsi_tcp_conn
*tcp_conn
)
1070 struct sock
*sk
= tcp_conn
->sock
->sk
;
1072 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1073 write_lock_bh(&sk
->sk_callback_lock
);
1074 sk
->sk_user_data
= NULL
;
1075 sk
->sk_data_ready
= tcp_conn
->old_data_ready
;
1076 sk
->sk_state_change
= tcp_conn
->old_state_change
;
1077 sk
->sk_write_space
= tcp_conn
->old_write_space
;
1078 sk
->sk_no_check
= 0;
1079 write_unlock_bh(&sk
->sk_callback_lock
);
1083 * iscsi_send - generic send routine
1084 * @sk: kernel's socket
1085 * @buf: buffer to write from
1086 * @size: actual size to write
1087 * @flags: socket's flags
1090 iscsi_send(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int size
, int flags
)
1092 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1093 struct socket
*sk
= tcp_conn
->sock
;
1094 int offset
= buf
->sg
.offset
+ buf
->sent
, res
;
1097 * if we got use_sg=0 or are sending something we kmallocd
1098 * then we did not have to do kmap (kmap returns page_address)
1100 * if we got use_sg > 0, but had to drop down, we do not
1101 * set clustering so this should only happen for that
1104 if (buf
->use_sendmsg
)
1105 res
= sock_no_sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1107 res
= tcp_conn
->sendpage(sk
, buf
->sg
.page
, offset
, size
, flags
);
1110 conn
->txdata_octets
+= res
;
1115 tcp_conn
->sendpage_failures_cnt
++;
1119 iscsi_conn_failure(conn
, ISCSI_ERR_CONN_FAILED
);
1124 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1125 * @conn: iscsi connection
1126 * @buf: buffer to write from
1127 * @datalen: lenght of data to be sent after the header
1133 iscsi_sendhdr(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
, int datalen
)
1135 int flags
= 0; /* MSG_DONTWAIT; */
1138 size
= buf
->sg
.length
- buf
->sent
;
1139 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1140 if (buf
->sent
+ size
!= buf
->sg
.length
|| datalen
)
1143 res
= iscsi_send(conn
, buf
, size
, flags
);
1144 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size
, buf
->sent
, res
);
1155 * iscsi_sendpage - send one page of iSCSI Data-Out.
1156 * @conn: iscsi connection
1157 * @buf: buffer to write from
1158 * @count: remaining data
1159 * @sent: number of bytes sent
1165 iscsi_sendpage(struct iscsi_conn
*conn
, struct iscsi_buf
*buf
,
1166 int *count
, int *sent
)
1168 int flags
= 0; /* MSG_DONTWAIT; */
1171 size
= buf
->sg
.length
- buf
->sent
;
1172 BUG_ON(buf
->sent
+ size
> buf
->sg
.length
);
1175 if (buf
->sent
+ size
!= buf
->sg
.length
|| *count
!= size
)
1178 res
= iscsi_send(conn
, buf
, size
, flags
);
1179 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1180 size
, buf
->sent
, *count
, *sent
, res
);
1193 iscsi_data_digest_init(struct iscsi_tcp_conn
*tcp_conn
,
1194 struct iscsi_tcp_cmd_task
*tcp_ctask
)
1196 crypto_hash_init(&tcp_conn
->tx_hash
);
1197 tcp_ctask
->digest_count
= 4;
1201 * iscsi_solicit_data_cont - initialize next Data-Out
1202 * @conn: iscsi connection
1203 * @ctask: scsi command task
1205 * @left: bytes left to transfer
1208 * Initialize next Data-Out within this R2T sequence and continue
1209 * to process next Scatter-Gather element(if any) of this SCSI command.
1211 * Called under connection lock.
1214 iscsi_solicit_data_cont(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1215 struct iscsi_r2t_info
*r2t
, int left
)
1217 struct iscsi_data
*hdr
;
1218 struct scsi_cmnd
*sc
= ctask
->sc
;
1221 hdr
= &r2t
->dtask
.hdr
;
1222 memset(hdr
, 0, sizeof(struct iscsi_data
));
1223 hdr
->ttt
= r2t
->ttt
;
1224 hdr
->datasn
= cpu_to_be32(r2t
->solicit_datasn
);
1225 r2t
->solicit_datasn
++;
1226 hdr
->opcode
= ISCSI_OP_SCSI_DATA_OUT
;
1227 memcpy(hdr
->lun
, ctask
->hdr
->lun
, sizeof(hdr
->lun
));
1228 hdr
->itt
= ctask
->hdr
->itt
;
1229 hdr
->exp_statsn
= r2t
->exp_statsn
;
1230 new_offset
= r2t
->data_offset
+ r2t
->sent
;
1231 hdr
->offset
= cpu_to_be32(new_offset
);
1232 if (left
> conn
->max_xmit_dlength
) {
1233 hton24(hdr
->dlength
, conn
->max_xmit_dlength
);
1234 r2t
->data_count
= conn
->max_xmit_dlength
;
1236 hton24(hdr
->dlength
, left
);
1237 r2t
->data_count
= left
;
1238 hdr
->flags
= ISCSI_FLAG_CMD_FINAL
;
1240 conn
->dataout_pdus_cnt
++;
1242 iscsi_buf_init_iov(&r2t
->headbuf
, (char*)hdr
,
1243 sizeof(struct iscsi_hdr
));
1245 if (iscsi_buf_left(&r2t
->sendbuf
))
1249 iscsi_buf_init_sg(&r2t
->sendbuf
, r2t
->sg
);
1252 iscsi_buf_init_iov(&r2t
->sendbuf
,
1253 (char*)sc
->request_buffer
+ new_offset
,
1259 static void iscsi_set_padding(struct iscsi_tcp_cmd_task
*tcp_ctask
,
1262 tcp_ctask
->pad_count
= len
& (ISCSI_PAD_LEN
- 1);
1263 if (!tcp_ctask
->pad_count
)
1266 tcp_ctask
->pad_count
= ISCSI_PAD_LEN
- tcp_ctask
->pad_count
;
1267 debug_scsi("write padding %d bytes\n", tcp_ctask
->pad_count
);
1268 tcp_ctask
->xmstate
|= XMSTATE_W_PAD
;
1272 * iscsi_tcp_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1273 * @conn: iscsi connection
1274 * @ctask: scsi command task
1278 iscsi_tcp_cmd_init(struct iscsi_cmd_task
*ctask
)
1280 struct scsi_cmnd
*sc
= ctask
->sc
;
1281 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1283 BUG_ON(__kfifo_len(tcp_ctask
->r2tqueue
));
1285 tcp_ctask
->sent
= 0;
1286 tcp_ctask
->sg_count
= 0;
1288 if (sc
->sc_data_direction
== DMA_TO_DEVICE
) {
1289 tcp_ctask
->xmstate
= XMSTATE_W_HDR
;
1290 tcp_ctask
->exp_r2tsn
= 0;
1291 BUG_ON(ctask
->total_length
== 0);
1294 struct scatterlist
*sg
= sc
->request_buffer
;
1296 iscsi_buf_init_sg(&tcp_ctask
->sendbuf
, sg
);
1297 tcp_ctask
->sg
= sg
+ 1;
1298 tcp_ctask
->bad_sg
= sg
+ sc
->use_sg
;
1300 iscsi_buf_init_iov(&tcp_ctask
->sendbuf
,
1302 sc
->request_bufflen
);
1303 tcp_ctask
->sg
= NULL
;
1304 tcp_ctask
->bad_sg
= NULL
;
1306 debug_scsi("cmd [itt 0x%x total %d imm_data %d "
1307 "unsol count %d, unsol offset %d]\n",
1308 ctask
->itt
, ctask
->total_length
, ctask
->imm_count
,
1309 ctask
->unsol_count
, ctask
->unsol_offset
);
1311 tcp_ctask
->xmstate
= XMSTATE_R_HDR
;
1313 iscsi_buf_init_iov(&tcp_ctask
->headbuf
, (char*)ctask
->hdr
,
1314 sizeof(struct iscsi_hdr
));
1318 * iscsi_tcp_mtask_xmit - xmit management(immediate) task
1319 * @conn: iscsi connection
1320 * @mtask: task management task
1323 * The function can return -EAGAIN in which case caller must
1324 * call it again later, or recover. '0' return code means successful
1327 * Management xmit state machine consists of two states:
1328 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1329 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1332 iscsi_tcp_mtask_xmit(struct iscsi_conn
*conn
, struct iscsi_mgmt_task
*mtask
)
1334 struct iscsi_tcp_mgmt_task
*tcp_mtask
= mtask
->dd_data
;
1337 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1338 conn
->id
, tcp_mtask
->xmstate
, mtask
->itt
);
1340 if (tcp_mtask
->xmstate
& XMSTATE_IMM_HDR
) {
1341 tcp_mtask
->xmstate
&= ~XMSTATE_IMM_HDR
;
1342 if (mtask
->data_count
)
1343 tcp_mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1344 if (conn
->c_stage
!= ISCSI_CONN_INITIAL_STAGE
&&
1345 conn
->stop_stage
!= STOP_CONN_RECOVER
&&
1347 iscsi_hdr_digest(conn
, &tcp_mtask
->headbuf
,
1348 (u8
*)tcp_mtask
->hdrext
);
1349 rc
= iscsi_sendhdr(conn
, &tcp_mtask
->headbuf
,
1352 tcp_mtask
->xmstate
|= XMSTATE_IMM_HDR
;
1353 if (mtask
->data_count
)
1354 tcp_mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1359 if (tcp_mtask
->xmstate
& XMSTATE_IMM_DATA
) {
1360 BUG_ON(!mtask
->data_count
);
1361 tcp_mtask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1362 /* FIXME: implement.
1363 * Virtual buffer could be spreaded across multiple pages...
1368 rc
= iscsi_sendpage(conn
, &tcp_mtask
->sendbuf
,
1369 &mtask
->data_count
, &tcp_mtask
->sent
);
1371 tcp_mtask
->xmstate
|= XMSTATE_IMM_DATA
;
1374 } while (mtask
->data_count
);
1377 BUG_ON(tcp_mtask
->xmstate
!= XMSTATE_IDLE
);
1378 if (mtask
->hdr
->itt
== RESERVED_ITT
) {
1379 struct iscsi_session
*session
= conn
->session
;
1381 spin_lock_bh(&session
->lock
);
1382 list_del(&conn
->mtask
->running
);
1383 __kfifo_put(session
->mgmtpool
.queue
, (void*)&conn
->mtask
,
1385 spin_unlock_bh(&session
->lock
);
1391 iscsi_send_read_hdr(struct iscsi_conn
*conn
,
1392 struct iscsi_tcp_cmd_task
*tcp_ctask
)
1396 tcp_ctask
->xmstate
&= ~XMSTATE_R_HDR
;
1397 if (conn
->hdrdgst_en
)
1398 iscsi_hdr_digest(conn
, &tcp_ctask
->headbuf
,
1399 (u8
*)tcp_ctask
->hdrext
);
1400 rc
= iscsi_sendhdr(conn
, &tcp_ctask
->headbuf
, 0);
1402 BUG_ON(tcp_ctask
->xmstate
!= XMSTATE_IDLE
);
1403 return 0; /* wait for Data-In */
1405 tcp_ctask
->xmstate
|= XMSTATE_R_HDR
;
1410 iscsi_send_write_hdr(struct iscsi_conn
*conn
,
1411 struct iscsi_cmd_task
*ctask
)
1413 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1416 tcp_ctask
->xmstate
&= ~XMSTATE_W_HDR
;
1417 if (conn
->hdrdgst_en
)
1418 iscsi_hdr_digest(conn
, &tcp_ctask
->headbuf
,
1419 (u8
*)tcp_ctask
->hdrext
);
1420 rc
= iscsi_sendhdr(conn
, &tcp_ctask
->headbuf
, ctask
->imm_count
);
1422 tcp_ctask
->xmstate
|= XMSTATE_W_HDR
;
1426 if (ctask
->imm_count
) {
1427 tcp_ctask
->xmstate
|= XMSTATE_IMM_DATA
;
1428 iscsi_set_padding(tcp_ctask
, ctask
->imm_count
);
1430 if (ctask
->conn
->datadgst_en
) {
1431 iscsi_data_digest_init(ctask
->conn
->dd_data
, tcp_ctask
);
1432 tcp_ctask
->immdigest
= 0;
1436 if (ctask
->unsol_count
)
1437 tcp_ctask
->xmstate
|= XMSTATE_UNS_HDR
| XMSTATE_UNS_INIT
;
1442 iscsi_send_padding(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1444 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1445 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1448 if (tcp_ctask
->xmstate
& XMSTATE_W_PAD
) {
1449 iscsi_buf_init_iov(&tcp_ctask
->sendbuf
, (char*)&tcp_ctask
->pad
,
1450 tcp_ctask
->pad_count
);
1451 if (conn
->datadgst_en
)
1452 crypto_hash_update(&tcp_conn
->tx_hash
,
1453 &tcp_ctask
->sendbuf
.sg
,
1454 tcp_ctask
->sendbuf
.sg
.length
);
1455 } else if (!(tcp_ctask
->xmstate
& XMSTATE_W_RESEND_PAD
))
1458 tcp_ctask
->xmstate
&= ~XMSTATE_W_PAD
;
1459 tcp_ctask
->xmstate
&= ~XMSTATE_W_RESEND_PAD
;
1460 debug_scsi("sending %d pad bytes for itt 0x%x\n",
1461 tcp_ctask
->pad_count
, ctask
->itt
);
1462 rc
= iscsi_sendpage(conn
, &tcp_ctask
->sendbuf
, &tcp_ctask
->pad_count
,
1465 debug_scsi("padding send failed %d\n", rc
);
1466 tcp_ctask
->xmstate
|= XMSTATE_W_RESEND_PAD
;
1472 iscsi_send_digest(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
,
1473 struct iscsi_buf
*buf
, uint32_t *digest
)
1475 struct iscsi_tcp_cmd_task
*tcp_ctask
;
1476 struct iscsi_tcp_conn
*tcp_conn
;
1479 if (!conn
->datadgst_en
)
1482 tcp_ctask
= ctask
->dd_data
;
1483 tcp_conn
= conn
->dd_data
;
1485 if (!(tcp_ctask
->xmstate
& XMSTATE_W_RESEND_DATA_DIGEST
)) {
1486 crypto_hash_final(&tcp_conn
->tx_hash
, (u8
*)digest
);
1487 iscsi_buf_init_iov(buf
, (char*)digest
, 4);
1489 tcp_ctask
->xmstate
&= ~XMSTATE_W_RESEND_DATA_DIGEST
;
1491 rc
= iscsi_sendpage(conn
, buf
, &tcp_ctask
->digest_count
, &sent
);
1493 debug_scsi("sent digest 0x%x for itt 0x%x\n", *digest
,
1496 debug_scsi("sending digest 0x%x failed for itt 0x%x!\n",
1497 *digest
, ctask
->itt
);
1498 tcp_ctask
->xmstate
|= XMSTATE_W_RESEND_DATA_DIGEST
;
1504 iscsi_send_data(struct iscsi_cmd_task
*ctask
, struct iscsi_buf
*sendbuf
,
1505 struct scatterlist
**sg
, int *sent
, int *count
,
1506 struct iscsi_buf
*digestbuf
, uint32_t *digest
)
1508 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1509 struct iscsi_conn
*conn
= ctask
->conn
;
1510 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1511 int rc
, buf_sent
, offset
;
1515 offset
= sendbuf
->sent
;
1517 rc
= iscsi_sendpage(conn
, sendbuf
, count
, &buf_sent
);
1518 *sent
= *sent
+ buf_sent
;
1519 if (buf_sent
&& conn
->datadgst_en
)
1520 partial_sg_digest_update(&tcp_conn
->tx_hash
,
1521 &sendbuf
->sg
, sendbuf
->sg
.offset
+ offset
,
1523 if (!iscsi_buf_left(sendbuf
) && *sg
!= tcp_ctask
->bad_sg
) {
1524 iscsi_buf_init_sg(sendbuf
, *sg
);
1532 rc
= iscsi_send_padding(conn
, ctask
);
1536 return iscsi_send_digest(conn
, ctask
, digestbuf
, digest
);
1540 iscsi_send_unsol_hdr(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1542 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1543 struct iscsi_data_task
*dtask
;
1546 tcp_ctask
->xmstate
|= XMSTATE_UNS_DATA
;
1547 if (tcp_ctask
->xmstate
& XMSTATE_UNS_INIT
) {
1548 dtask
= &tcp_ctask
->unsol_dtask
;
1550 iscsi_prep_unsolicit_data_pdu(ctask
, &dtask
->hdr
);
1551 iscsi_buf_init_iov(&tcp_ctask
->headbuf
, (char*)&dtask
->hdr
,
1552 sizeof(struct iscsi_hdr
));
1553 if (conn
->hdrdgst_en
)
1554 iscsi_hdr_digest(conn
, &tcp_ctask
->headbuf
,
1555 (u8
*)dtask
->hdrext
);
1557 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_INIT
;
1558 iscsi_set_padding(tcp_ctask
, ctask
->data_count
);
1561 rc
= iscsi_sendhdr(conn
, &tcp_ctask
->headbuf
, ctask
->data_count
);
1563 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1564 tcp_ctask
->xmstate
|= XMSTATE_UNS_HDR
;
1568 if (conn
->datadgst_en
) {
1569 dtask
= &tcp_ctask
->unsol_dtask
;
1570 iscsi_data_digest_init(ctask
->conn
->dd_data
, tcp_ctask
);
1574 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1575 ctask
->itt
, ctask
->unsol_count
, tcp_ctask
->sent
);
1580 iscsi_send_unsol_pdu(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1582 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1585 if (tcp_ctask
->xmstate
& XMSTATE_UNS_HDR
) {
1586 BUG_ON(!ctask
->unsol_count
);
1587 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_HDR
;
1589 rc
= iscsi_send_unsol_hdr(conn
, ctask
);
1594 if (tcp_ctask
->xmstate
& XMSTATE_UNS_DATA
) {
1595 struct iscsi_data_task
*dtask
= &tcp_ctask
->unsol_dtask
;
1596 int start
= tcp_ctask
->sent
;
1598 rc
= iscsi_send_data(ctask
, &tcp_ctask
->sendbuf
, &tcp_ctask
->sg
,
1599 &tcp_ctask
->sent
, &ctask
->data_count
,
1600 &dtask
->digestbuf
, &dtask
->digest
);
1601 ctask
->unsol_count
-= tcp_ctask
->sent
- start
;
1604 tcp_ctask
->xmstate
&= ~XMSTATE_UNS_DATA
;
1606 * Done with the Data-Out. Next, check if we need
1607 * to send another unsolicited Data-Out.
1609 if (ctask
->unsol_count
) {
1610 debug_scsi("sending more uns\n");
1611 tcp_ctask
->xmstate
|= XMSTATE_UNS_INIT
;
1618 static int iscsi_send_sol_pdu(struct iscsi_conn
*conn
,
1619 struct iscsi_cmd_task
*ctask
)
1621 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1622 struct iscsi_session
*session
= conn
->session
;
1623 struct iscsi_r2t_info
*r2t
;
1624 struct iscsi_data_task
*dtask
;
1627 if (tcp_ctask
->xmstate
& XMSTATE_SOL_HDR
) {
1628 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
1629 tcp_ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1630 if (!tcp_ctask
->r2t
) {
1631 spin_lock_bh(&session
->lock
);
1632 __kfifo_get(tcp_ctask
->r2tqueue
, (void*)&tcp_ctask
->r2t
,
1634 spin_unlock_bh(&session
->lock
);
1637 r2t
= tcp_ctask
->r2t
;
1638 dtask
= &r2t
->dtask
;
1640 if (conn
->hdrdgst_en
)
1641 iscsi_hdr_digest(conn
, &r2t
->headbuf
,
1642 (u8
*)dtask
->hdrext
);
1643 rc
= iscsi_sendhdr(conn
, &r2t
->headbuf
, r2t
->data_count
);
1645 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
1646 tcp_ctask
->xmstate
|= XMSTATE_SOL_HDR
;
1650 if (conn
->datadgst_en
) {
1651 iscsi_data_digest_init(conn
->dd_data
, tcp_ctask
);
1655 iscsi_set_padding(tcp_ctask
, r2t
->data_count
);
1656 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
1657 r2t
->solicit_datasn
- 1, ctask
->itt
, r2t
->data_count
,
1661 if (tcp_ctask
->xmstate
& XMSTATE_SOL_DATA
) {
1662 r2t
= tcp_ctask
->r2t
;
1663 dtask
= &r2t
->dtask
;
1665 rc
= iscsi_send_data(ctask
, &r2t
->sendbuf
, &r2t
->sg
,
1666 &r2t
->sent
, &r2t
->data_count
,
1667 &dtask
->digestbuf
, &dtask
->digest
);
1670 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_DATA
;
1673 * Done with this Data-Out. Next, check if we have
1674 * to send another Data-Out for this R2T.
1676 BUG_ON(r2t
->data_length
- r2t
->sent
< 0);
1677 left
= r2t
->data_length
- r2t
->sent
;
1679 iscsi_solicit_data_cont(conn
, ctask
, r2t
, left
);
1680 tcp_ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1681 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
1686 * Done with this R2T. Check if there are more
1687 * outstanding R2Ts ready to be processed.
1689 spin_lock_bh(&session
->lock
);
1690 tcp_ctask
->r2t
= NULL
;
1691 __kfifo_put(tcp_ctask
->r2tpool
.queue
, (void*)&r2t
,
1693 if (__kfifo_get(tcp_ctask
->r2tqueue
, (void*)&r2t
,
1695 tcp_ctask
->r2t
= r2t
;
1696 tcp_ctask
->xmstate
|= XMSTATE_SOL_DATA
;
1697 tcp_ctask
->xmstate
&= ~XMSTATE_SOL_HDR
;
1698 spin_unlock_bh(&session
->lock
);
1701 spin_unlock_bh(&session
->lock
);
1707 iscsi_tcp_ctask_xmit(struct iscsi_conn
*conn
, struct iscsi_cmd_task
*ctask
)
1709 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1712 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
1713 conn
->id
, tcp_ctask
->xmstate
, ctask
->itt
);
1716 * serialize with TMF AbortTask
1721 if (tcp_ctask
->xmstate
& XMSTATE_R_HDR
)
1722 return iscsi_send_read_hdr(conn
, tcp_ctask
);
1724 if (tcp_ctask
->xmstate
& XMSTATE_W_HDR
) {
1725 rc
= iscsi_send_write_hdr(conn
, ctask
);
1730 if (tcp_ctask
->xmstate
& XMSTATE_IMM_DATA
) {
1731 rc
= iscsi_send_data(ctask
, &tcp_ctask
->sendbuf
, &tcp_ctask
->sg
,
1732 &tcp_ctask
->sent
, &ctask
->imm_count
,
1733 &tcp_ctask
->immbuf
, &tcp_ctask
->immdigest
);
1736 tcp_ctask
->xmstate
&= ~XMSTATE_IMM_DATA
;
1739 rc
= iscsi_send_unsol_pdu(conn
, ctask
);
1743 rc
= iscsi_send_sol_pdu(conn
, ctask
);
1750 static struct iscsi_cls_conn
*
1751 iscsi_tcp_conn_create(struct iscsi_cls_session
*cls_session
, uint32_t conn_idx
)
1753 struct iscsi_conn
*conn
;
1754 struct iscsi_cls_conn
*cls_conn
;
1755 struct iscsi_tcp_conn
*tcp_conn
;
1757 cls_conn
= iscsi_conn_setup(cls_session
, conn_idx
);
1760 conn
= cls_conn
->dd_data
;
1762 * due to strange issues with iser these are not set
1763 * in iscsi_conn_setup
1765 conn
->max_recv_dlength
= ISCSI_DEF_MAX_RECV_SEG_LEN
;
1767 tcp_conn
= kzalloc(sizeof(*tcp_conn
), GFP_KERNEL
);
1769 goto tcp_conn_alloc_fail
;
1771 conn
->dd_data
= tcp_conn
;
1772 tcp_conn
->iscsi_conn
= conn
;
1773 tcp_conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
1774 /* initial operational parameters */
1775 tcp_conn
->hdr_size
= sizeof(struct iscsi_hdr
);
1777 tcp_conn
->tx_hash
.tfm
= crypto_alloc_hash("crc32c", 0,
1779 tcp_conn
->tx_hash
.flags
= 0;
1780 if (IS_ERR(tcp_conn
->tx_hash
.tfm
)) {
1781 printk(KERN_ERR
"Could not create connection due to crc32c "
1782 "loading error %ld. Make sure the crc32c module is "
1783 "built as a module or into the kernel\n",
1784 PTR_ERR(tcp_conn
->tx_hash
.tfm
));
1788 tcp_conn
->rx_hash
.tfm
= crypto_alloc_hash("crc32c", 0,
1790 tcp_conn
->rx_hash
.flags
= 0;
1791 if (IS_ERR(tcp_conn
->rx_hash
.tfm
)) {
1792 printk(KERN_ERR
"Could not create connection due to crc32c "
1793 "loading error %ld. Make sure the crc32c module is "
1794 "built as a module or into the kernel\n",
1795 PTR_ERR(tcp_conn
->rx_hash
.tfm
));
1802 crypto_free_hash(tcp_conn
->tx_hash
.tfm
);
1805 tcp_conn_alloc_fail
:
1806 iscsi_conn_teardown(cls_conn
);
1811 iscsi_tcp_release_conn(struct iscsi_conn
*conn
)
1813 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1815 if (!tcp_conn
->sock
)
1818 sock_hold(tcp_conn
->sock
->sk
);
1819 iscsi_conn_restore_callbacks(tcp_conn
);
1820 sock_put(tcp_conn
->sock
->sk
);
1822 sock_release(tcp_conn
->sock
);
1823 tcp_conn
->sock
= NULL
;
1824 conn
->recv_lock
= NULL
;
1828 iscsi_tcp_conn_destroy(struct iscsi_cls_conn
*cls_conn
)
1830 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1831 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1833 iscsi_tcp_release_conn(conn
);
1834 iscsi_conn_teardown(cls_conn
);
1836 if (tcp_conn
->tx_hash
.tfm
)
1837 crypto_free_hash(tcp_conn
->tx_hash
.tfm
);
1838 if (tcp_conn
->rx_hash
.tfm
)
1839 crypto_free_hash(tcp_conn
->rx_hash
.tfm
);
1845 iscsi_tcp_conn_stop(struct iscsi_cls_conn
*cls_conn
, int flag
)
1847 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1848 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1850 iscsi_conn_stop(cls_conn
, flag
);
1851 iscsi_tcp_release_conn(conn
);
1852 tcp_conn
->hdr_size
= sizeof(struct iscsi_hdr
);
1856 iscsi_tcp_conn_bind(struct iscsi_cls_session
*cls_session
,
1857 struct iscsi_cls_conn
*cls_conn
, uint64_t transport_eph
,
1860 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1861 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1863 struct socket
*sock
;
1866 /* lookup for existing socket */
1867 sock
= sockfd_lookup((int)transport_eph
, &err
);
1869 printk(KERN_ERR
"iscsi_tcp: sockfd_lookup failed %d\n", err
);
1873 err
= iscsi_conn_bind(cls_session
, cls_conn
, is_leading
);
1877 /* bind iSCSI connection and socket */
1878 tcp_conn
->sock
= sock
;
1880 /* setup Socket parameters */
1883 sk
->sk_sndtimeo
= 15 * HZ
; /* FIXME: make it configurable */
1884 sk
->sk_allocation
= GFP_ATOMIC
;
1886 /* FIXME: disable Nagle's algorithm */
1889 * Intercept TCP callbacks for sendfile like receive
1892 conn
->recv_lock
= &sk
->sk_callback_lock
;
1893 iscsi_conn_set_callbacks(conn
);
1894 tcp_conn
->sendpage
= tcp_conn
->sock
->ops
->sendpage
;
1896 * set receive state machine into initial state
1898 tcp_conn
->in_progress
= IN_PROGRESS_WAIT_HEADER
;
1903 /* called with host lock */
1905 iscsi_tcp_mgmt_init(struct iscsi_conn
*conn
, struct iscsi_mgmt_task
*mtask
,
1906 char *data
, uint32_t data_size
)
1908 struct iscsi_tcp_mgmt_task
*tcp_mtask
= mtask
->dd_data
;
1910 iscsi_buf_init_iov(&tcp_mtask
->headbuf
, (char*)mtask
->hdr
,
1911 sizeof(struct iscsi_hdr
));
1912 tcp_mtask
->xmstate
= XMSTATE_IMM_HDR
;
1913 tcp_mtask
->sent
= 0;
1915 if (mtask
->data_count
)
1916 iscsi_buf_init_iov(&tcp_mtask
->sendbuf
, (char*)mtask
->data
,
1921 iscsi_r2tpool_alloc(struct iscsi_session
*session
)
1927 * initialize per-task: R2T pool and xmit queue
1929 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
1930 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
1931 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1934 * pre-allocated x4 as much r2ts to handle race when
1935 * target acks DataOut faster than we data_xmit() queues
1936 * could replenish r2tqueue.
1940 if (iscsi_pool_init(&tcp_ctask
->r2tpool
, session
->max_r2t
* 4,
1941 (void***)&tcp_ctask
->r2ts
,
1942 sizeof(struct iscsi_r2t_info
))) {
1943 goto r2t_alloc_fail
;
1946 /* R2T xmit queue */
1947 tcp_ctask
->r2tqueue
= kfifo_alloc(
1948 session
->max_r2t
* 4 * sizeof(void*), GFP_KERNEL
, NULL
);
1949 if (tcp_ctask
->r2tqueue
== ERR_PTR(-ENOMEM
)) {
1950 iscsi_pool_free(&tcp_ctask
->r2tpool
,
1951 (void**)tcp_ctask
->r2ts
);
1952 goto r2t_alloc_fail
;
1959 for (i
= 0; i
< cmd_i
; i
++) {
1960 struct iscsi_cmd_task
*ctask
= session
->cmds
[i
];
1961 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1963 kfifo_free(tcp_ctask
->r2tqueue
);
1964 iscsi_pool_free(&tcp_ctask
->r2tpool
,
1965 (void**)tcp_ctask
->r2ts
);
1971 iscsi_r2tpool_free(struct iscsi_session
*session
)
1975 for (i
= 0; i
< session
->cmds_max
; i
++) {
1976 struct iscsi_cmd_task
*ctask
= session
->cmds
[i
];
1977 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
1979 kfifo_free(tcp_ctask
->r2tqueue
);
1980 iscsi_pool_free(&tcp_ctask
->r2tpool
,
1981 (void**)tcp_ctask
->r2ts
);
1986 iscsi_conn_set_param(struct iscsi_cls_conn
*cls_conn
, enum iscsi_param param
,
1987 char *buf
, int buflen
)
1989 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
1990 struct iscsi_session
*session
= conn
->session
;
1991 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
1995 case ISCSI_PARAM_HDRDGST_EN
:
1996 iscsi_set_param(cls_conn
, param
, buf
, buflen
);
1997 tcp_conn
->hdr_size
= sizeof(struct iscsi_hdr
);
1998 if (conn
->hdrdgst_en
)
1999 tcp_conn
->hdr_size
+= sizeof(__u32
);
2001 case ISCSI_PARAM_DATADGST_EN
:
2002 iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2003 tcp_conn
->sendpage
= conn
->datadgst_en
?
2004 sock_no_sendpage
: tcp_conn
->sock
->ops
->sendpage
;
2006 case ISCSI_PARAM_MAX_R2T
:
2007 sscanf(buf
, "%d", &value
);
2008 if (session
->max_r2t
== roundup_pow_of_two(value
))
2010 iscsi_r2tpool_free(session
);
2011 iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2012 if (session
->max_r2t
& (session
->max_r2t
- 1))
2013 session
->max_r2t
= roundup_pow_of_two(session
->max_r2t
);
2014 if (iscsi_r2tpool_alloc(session
))
2018 return iscsi_set_param(cls_conn
, param
, buf
, buflen
);
2025 iscsi_tcp_conn_get_param(struct iscsi_cls_conn
*cls_conn
,
2026 enum iscsi_param param
, char *buf
)
2028 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2029 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2030 struct inet_sock
*inet
;
2031 struct ipv6_pinfo
*np
;
2036 case ISCSI_PARAM_CONN_PORT
:
2037 mutex_lock(&conn
->xmitmutex
);
2038 if (!tcp_conn
->sock
) {
2039 mutex_unlock(&conn
->xmitmutex
);
2043 inet
= inet_sk(tcp_conn
->sock
->sk
);
2044 len
= sprintf(buf
, "%hu\n", be16_to_cpu(inet
->dport
));
2045 mutex_unlock(&conn
->xmitmutex
);
2047 case ISCSI_PARAM_CONN_ADDRESS
:
2048 mutex_lock(&conn
->xmitmutex
);
2049 if (!tcp_conn
->sock
) {
2050 mutex_unlock(&conn
->xmitmutex
);
2054 sk
= tcp_conn
->sock
->sk
;
2055 if (sk
->sk_family
== PF_INET
) {
2057 len
= sprintf(buf
, NIPQUAD_FMT
"\n",
2058 NIPQUAD(inet
->daddr
));
2061 len
= sprintf(buf
, NIP6_FMT
"\n", NIP6(np
->daddr
));
2063 mutex_unlock(&conn
->xmitmutex
);
2066 return iscsi_conn_get_param(cls_conn
, param
, buf
);
2073 iscsi_conn_get_stats(struct iscsi_cls_conn
*cls_conn
, struct iscsi_stats
*stats
)
2075 struct iscsi_conn
*conn
= cls_conn
->dd_data
;
2076 struct iscsi_tcp_conn
*tcp_conn
= conn
->dd_data
;
2078 stats
->txdata_octets
= conn
->txdata_octets
;
2079 stats
->rxdata_octets
= conn
->rxdata_octets
;
2080 stats
->scsicmd_pdus
= conn
->scsicmd_pdus_cnt
;
2081 stats
->dataout_pdus
= conn
->dataout_pdus_cnt
;
2082 stats
->scsirsp_pdus
= conn
->scsirsp_pdus_cnt
;
2083 stats
->datain_pdus
= conn
->datain_pdus_cnt
;
2084 stats
->r2t_pdus
= conn
->r2t_pdus_cnt
;
2085 stats
->tmfcmd_pdus
= conn
->tmfcmd_pdus_cnt
;
2086 stats
->tmfrsp_pdus
= conn
->tmfrsp_pdus_cnt
;
2087 stats
->custom_length
= 3;
2088 strcpy(stats
->custom
[0].desc
, "tx_sendpage_failures");
2089 stats
->custom
[0].value
= tcp_conn
->sendpage_failures_cnt
;
2090 strcpy(stats
->custom
[1].desc
, "rx_discontiguous_hdr");
2091 stats
->custom
[1].value
= tcp_conn
->discontiguous_hdr_cnt
;
2092 strcpy(stats
->custom
[2].desc
, "eh_abort_cnt");
2093 stats
->custom
[2].value
= conn
->eh_abort_cnt
;
2096 static struct iscsi_cls_session
*
2097 iscsi_tcp_session_create(struct iscsi_transport
*iscsit
,
2098 struct scsi_transport_template
*scsit
,
2099 uint32_t initial_cmdsn
, uint32_t *hostno
)
2101 struct iscsi_cls_session
*cls_session
;
2102 struct iscsi_session
*session
;
2106 cls_session
= iscsi_session_setup(iscsit
, scsit
,
2107 sizeof(struct iscsi_tcp_cmd_task
),
2108 sizeof(struct iscsi_tcp_mgmt_task
),
2109 initial_cmdsn
, &hn
);
2114 session
= class_to_transport_session(cls_session
);
2115 for (cmd_i
= 0; cmd_i
< session
->cmds_max
; cmd_i
++) {
2116 struct iscsi_cmd_task
*ctask
= session
->cmds
[cmd_i
];
2117 struct iscsi_tcp_cmd_task
*tcp_ctask
= ctask
->dd_data
;
2119 ctask
->hdr
= &tcp_ctask
->hdr
;
2122 for (cmd_i
= 0; cmd_i
< session
->mgmtpool_max
; cmd_i
++) {
2123 struct iscsi_mgmt_task
*mtask
= session
->mgmt_cmds
[cmd_i
];
2124 struct iscsi_tcp_mgmt_task
*tcp_mtask
= mtask
->dd_data
;
2126 mtask
->hdr
= &tcp_mtask
->hdr
;
2129 if (iscsi_r2tpool_alloc(class_to_transport_session(cls_session
)))
2130 goto r2tpool_alloc_fail
;
2135 iscsi_session_teardown(cls_session
);
2139 static void iscsi_tcp_session_destroy(struct iscsi_cls_session
*cls_session
)
2141 iscsi_r2tpool_free(class_to_transport_session(cls_session
));
2142 iscsi_session_teardown(cls_session
);
2145 static struct scsi_host_template iscsi_sht
= {
2146 .name
= "iSCSI Initiator over TCP/IP",
2147 .queuecommand
= iscsi_queuecommand
,
2148 .change_queue_depth
= iscsi_change_queue_depth
,
2149 .can_queue
= ISCSI_XMIT_CMDS_MAX
- 1,
2150 .sg_tablesize
= ISCSI_SG_TABLESIZE
,
2151 .max_sectors
= 0xFFFF,
2152 .cmd_per_lun
= ISCSI_DEF_CMD_PER_LUN
,
2153 .eh_abort_handler
= iscsi_eh_abort
,
2154 .eh_host_reset_handler
= iscsi_eh_host_reset
,
2155 .use_clustering
= DISABLE_CLUSTERING
,
2156 .proc_name
= "iscsi_tcp",
2160 static struct iscsi_transport iscsi_tcp_transport
= {
2161 .owner
= THIS_MODULE
,
2163 .caps
= CAP_RECOVERY_L0
| CAP_MULTI_R2T
| CAP_HDRDGST
2165 .param_mask
= ISCSI_MAX_RECV_DLENGTH
|
2166 ISCSI_MAX_XMIT_DLENGTH
|
2169 ISCSI_INITIAL_R2T_EN
|
2174 ISCSI_PDU_INORDER_EN
|
2175 ISCSI_DATASEQ_INORDER_EN
|
2178 ISCSI_CONN_ADDRESS
|
2180 ISCSI_PERSISTENT_PORT
|
2181 ISCSI_PERSISTENT_ADDRESS
|
2184 .host_template
= &iscsi_sht
,
2185 .conndata_size
= sizeof(struct iscsi_conn
),
2187 .max_cmd_len
= ISCSI_TCP_MAX_CMD_LEN
,
2188 /* session management */
2189 .create_session
= iscsi_tcp_session_create
,
2190 .destroy_session
= iscsi_tcp_session_destroy
,
2191 /* connection management */
2192 .create_conn
= iscsi_tcp_conn_create
,
2193 .bind_conn
= iscsi_tcp_conn_bind
,
2194 .destroy_conn
= iscsi_tcp_conn_destroy
,
2195 .set_param
= iscsi_conn_set_param
,
2196 .get_conn_param
= iscsi_tcp_conn_get_param
,
2197 .get_session_param
= iscsi_session_get_param
,
2198 .start_conn
= iscsi_conn_start
,
2199 .stop_conn
= iscsi_tcp_conn_stop
,
2201 .send_pdu
= iscsi_conn_send_pdu
,
2202 .get_stats
= iscsi_conn_get_stats
,
2203 .init_cmd_task
= iscsi_tcp_cmd_init
,
2204 .init_mgmt_task
= iscsi_tcp_mgmt_init
,
2205 .xmit_cmd_task
= iscsi_tcp_ctask_xmit
,
2206 .xmit_mgmt_task
= iscsi_tcp_mtask_xmit
,
2207 .cleanup_cmd_task
= iscsi_tcp_cleanup_ctask
,
2209 .session_recovery_timedout
= iscsi_session_recovery_timedout
,
2213 iscsi_tcp_init(void)
2215 if (iscsi_max_lun
< 1) {
2216 printk(KERN_ERR
"iscsi_tcp: Invalid max_lun value of %u\n",
2220 iscsi_tcp_transport
.max_lun
= iscsi_max_lun
;
2222 if (!iscsi_register_transport(&iscsi_tcp_transport
))
2229 iscsi_tcp_exit(void)
2231 iscsi_unregister_transport(&iscsi_tcp_transport
);
2234 module_init(iscsi_tcp_init
);
2235 module_exit(iscsi_tcp_exit
);