[SCSI] cxgbi: convert to use iscsi_conn_get_addr_param
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / scsi / cxgbi / libcxgbi.c
blobfedf1be278ffeea040935522b84ac0d9dc06c5f2
1 /*
2 * libcxgbi.c: Chelsio common library for T3/T4 iSCSI driver.
4 * Copyright (c) 2010 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
11 * Written by: Rakesh Ranjan (rranjan@chelsio.com)
14 #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
16 #include <linux/skbuff.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/pci.h>
20 #include <scsi/scsi.h>
21 #include <scsi/scsi_cmnd.h>
22 #include <scsi/scsi_host.h>
23 #include <linux/if_vlan.h>
24 #include <linux/inet.h>
25 #include <net/dst.h>
26 #include <net/route.h>
27 #include <linux/inetdevice.h> /* ip_dev_find */
28 #include <net/tcp.h>
30 static unsigned int dbg_level;
32 #include "libcxgbi.h"
34 #define DRV_MODULE_NAME "libcxgbi"
35 #define DRV_MODULE_DESC "Chelsio iSCSI driver library"
36 #define DRV_MODULE_VERSION "0.9.0"
37 #define DRV_MODULE_RELDATE "Jun. 2010"
39 MODULE_AUTHOR("Chelsio Communications, Inc.");
40 MODULE_DESCRIPTION(DRV_MODULE_DESC);
41 MODULE_VERSION(DRV_MODULE_VERSION);
42 MODULE_LICENSE("GPL");
44 module_param(dbg_level, uint, 0644);
45 MODULE_PARM_DESC(dbg_level, "libiscsi debug level (default=0)");
49 * cxgbi device management
50 * maintains a list of the cxgbi devices
52 static LIST_HEAD(cdev_list);
53 static DEFINE_MUTEX(cdev_mutex);
55 int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
56 unsigned int max_conn)
58 struct cxgbi_ports_map *pmap = &cdev->pmap;
60 pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
61 sizeof(struct cxgbi_sock *),
62 GFP_KERNEL);
63 if (!pmap->port_csk) {
64 pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
65 return -ENOMEM;
68 pmap->max_connect = max_conn;
69 pmap->sport_base = base;
70 spin_lock_init(&pmap->lock);
71 return 0;
73 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_create);
75 void cxgbi_device_portmap_cleanup(struct cxgbi_device *cdev)
77 struct cxgbi_ports_map *pmap = &cdev->pmap;
78 struct cxgbi_sock *csk;
79 int i;
81 for (i = 0; i < pmap->max_connect; i++) {
82 if (pmap->port_csk[i]) {
83 csk = pmap->port_csk[i];
84 pmap->port_csk[i] = NULL;
85 log_debug(1 << CXGBI_DBG_SOCK,
86 "csk 0x%p, cdev 0x%p, offload down.\n",
87 csk, cdev);
88 spin_lock_bh(&csk->lock);
89 cxgbi_sock_set_flag(csk, CTPF_OFFLOAD_DOWN);
90 cxgbi_sock_closed(csk);
91 spin_unlock_bh(&csk->lock);
92 cxgbi_sock_put(csk);
96 EXPORT_SYMBOL_GPL(cxgbi_device_portmap_cleanup);
98 static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
100 log_debug(1 << CXGBI_DBG_DEV,
101 "cdev 0x%p, p# %u.\n", cdev, cdev->nports);
102 cxgbi_hbas_remove(cdev);
103 cxgbi_device_portmap_cleanup(cdev);
104 if (cdev->dev_ddp_cleanup)
105 cdev->dev_ddp_cleanup(cdev);
106 else
107 cxgbi_ddp_cleanup(cdev);
108 if (cdev->ddp)
109 cxgbi_ddp_cleanup(cdev);
110 if (cdev->pmap.max_connect)
111 cxgbi_free_big_mem(cdev->pmap.port_csk);
112 kfree(cdev);
115 struct cxgbi_device *cxgbi_device_register(unsigned int extra,
116 unsigned int nports)
118 struct cxgbi_device *cdev;
120 cdev = kzalloc(sizeof(*cdev) + extra + nports *
121 (sizeof(struct cxgbi_hba *) +
122 sizeof(struct net_device *)),
123 GFP_KERNEL);
124 if (!cdev) {
125 pr_warn("nport %d, OOM.\n", nports);
126 return NULL;
128 cdev->ports = (struct net_device **)(cdev + 1);
129 cdev->hbas = (struct cxgbi_hba **)(((char*)cdev->ports) + nports *
130 sizeof(struct net_device *));
131 if (extra)
132 cdev->dd_data = ((char *)cdev->hbas) +
133 nports * sizeof(struct cxgbi_hba *);
134 spin_lock_init(&cdev->pmap.lock);
136 mutex_lock(&cdev_mutex);
137 list_add_tail(&cdev->list_head, &cdev_list);
138 mutex_unlock(&cdev_mutex);
140 log_debug(1 << CXGBI_DBG_DEV,
141 "cdev 0x%p, p# %u.\n", cdev, nports);
142 return cdev;
144 EXPORT_SYMBOL_GPL(cxgbi_device_register);
146 void cxgbi_device_unregister(struct cxgbi_device *cdev)
148 log_debug(1 << CXGBI_DBG_DEV,
149 "cdev 0x%p, p# %u,%s.\n",
150 cdev, cdev->nports, cdev->nports ? cdev->ports[0]->name : "");
151 mutex_lock(&cdev_mutex);
152 list_del(&cdev->list_head);
153 mutex_unlock(&cdev_mutex);
154 cxgbi_device_destroy(cdev);
156 EXPORT_SYMBOL_GPL(cxgbi_device_unregister);
158 void cxgbi_device_unregister_all(unsigned int flag)
160 struct cxgbi_device *cdev, *tmp;
162 mutex_lock(&cdev_mutex);
163 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
164 if ((cdev->flags & flag) == flag) {
165 log_debug(1 << CXGBI_DBG_DEV,
166 "cdev 0x%p, p# %u,%s.\n",
167 cdev, cdev->nports, cdev->nports ?
168 cdev->ports[0]->name : "");
169 list_del(&cdev->list_head);
170 cxgbi_device_destroy(cdev);
173 mutex_unlock(&cdev_mutex);
175 EXPORT_SYMBOL_GPL(cxgbi_device_unregister_all);
177 struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
179 struct cxgbi_device *cdev, *tmp;
181 mutex_lock(&cdev_mutex);
182 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
183 if (cdev->lldev == lldev) {
184 mutex_unlock(&cdev_mutex);
185 return cdev;
188 mutex_unlock(&cdev_mutex);
189 log_debug(1 << CXGBI_DBG_DEV,
190 "lldev 0x%p, NO match found.\n", lldev);
191 return NULL;
193 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
195 static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
196 int *port)
198 struct net_device *vdev = NULL;
199 struct cxgbi_device *cdev, *tmp;
200 int i;
202 if (ndev->priv_flags & IFF_802_1Q_VLAN) {
203 vdev = ndev;
204 ndev = vlan_dev_real_dev(ndev);
205 log_debug(1 << CXGBI_DBG_DEV,
206 "vlan dev %s -> %s.\n", vdev->name, ndev->name);
209 mutex_lock(&cdev_mutex);
210 list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
211 for (i = 0; i < cdev->nports; i++) {
212 if (ndev == cdev->ports[i]) {
213 cdev->hbas[i]->vdev = vdev;
214 mutex_unlock(&cdev_mutex);
215 if (port)
216 *port = i;
217 return cdev;
221 mutex_unlock(&cdev_mutex);
222 log_debug(1 << CXGBI_DBG_DEV,
223 "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
224 return NULL;
227 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
229 int i;
230 struct cxgbi_hba *chba;
232 log_debug(1 << CXGBI_DBG_DEV,
233 "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
235 for (i = 0; i < cdev->nports; i++) {
236 chba = cdev->hbas[i];
237 if (chba) {
238 cdev->hbas[i] = NULL;
239 iscsi_host_remove(chba->shost);
240 pci_dev_put(cdev->pdev);
241 iscsi_host_free(chba->shost);
245 EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
247 int cxgbi_hbas_add(struct cxgbi_device *cdev, unsigned int max_lun,
248 unsigned int max_id, struct scsi_host_template *sht,
249 struct scsi_transport_template *stt)
251 struct cxgbi_hba *chba;
252 struct Scsi_Host *shost;
253 int i, err;
255 log_debug(1 << CXGBI_DBG_DEV, "cdev 0x%p, p#%u.\n", cdev, cdev->nports);
257 for (i = 0; i < cdev->nports; i++) {
258 shost = iscsi_host_alloc(sht, sizeof(*chba), 1);
259 if (!shost) {
260 pr_info("0x%p, p%d, %s, host alloc failed.\n",
261 cdev, i, cdev->ports[i]->name);
262 err = -ENOMEM;
263 goto err_out;
266 shost->transportt = stt;
267 shost->max_lun = max_lun;
268 shost->max_id = max_id;
269 shost->max_channel = 0;
270 shost->max_cmd_len = 16;
272 chba = iscsi_host_priv(shost);
273 chba->cdev = cdev;
274 chba->ndev = cdev->ports[i];
275 chba->shost = shost;
277 log_debug(1 << CXGBI_DBG_DEV,
278 "cdev 0x%p, p#%d %s: chba 0x%p.\n",
279 cdev, i, cdev->ports[i]->name, chba);
281 pci_dev_get(cdev->pdev);
282 err = iscsi_host_add(shost, &cdev->pdev->dev);
283 if (err) {
284 pr_info("cdev 0x%p, p#%d %s, host add failed.\n",
285 cdev, i, cdev->ports[i]->name);
286 pci_dev_put(cdev->pdev);
287 scsi_host_put(shost);
288 goto err_out;
291 cdev->hbas[i] = chba;
294 return 0;
296 err_out:
297 cxgbi_hbas_remove(cdev);
298 return err;
300 EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
303 * iSCSI offload
305 * - source port management
306 * To find a free source port in the port allocation map we use a very simple
307 * rotor scheme to look for the next free port.
309 * If a source port has been specified make sure that it doesn't collide with
310 * our normal source port allocation map. If it's outside the range of our
311 * allocation/deallocation scheme just let them use it.
313 * If the source port is outside our allocation range, the caller is
314 * responsible for keeping track of their port usage.
316 static int sock_get_port(struct cxgbi_sock *csk)
318 struct cxgbi_device *cdev = csk->cdev;
319 struct cxgbi_ports_map *pmap = &cdev->pmap;
320 unsigned int start;
321 int idx;
323 if (!pmap->max_connect) {
324 pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
325 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
326 return -EADDRNOTAVAIL;
329 if (csk->saddr.sin_port) {
330 pr_err("source port NON-ZERO %u.\n",
331 ntohs(csk->saddr.sin_port));
332 return -EADDRINUSE;
335 spin_lock_bh(&pmap->lock);
336 if (pmap->used >= pmap->max_connect) {
337 spin_unlock_bh(&pmap->lock);
338 pr_info("cdev 0x%p, p#%u %s, ALL ports used.\n",
339 cdev, csk->port_id, cdev->ports[csk->port_id]->name);
340 return -EADDRNOTAVAIL;
343 start = idx = pmap->next;
344 do {
345 if (++idx >= pmap->max_connect)
346 idx = 0;
347 if (!pmap->port_csk[idx]) {
348 pmap->used++;
349 csk->saddr.sin_port =
350 htons(pmap->sport_base + idx);
351 pmap->next = idx;
352 pmap->port_csk[idx] = csk;
353 spin_unlock_bh(&pmap->lock);
354 cxgbi_sock_get(csk);
355 log_debug(1 << CXGBI_DBG_SOCK,
356 "cdev 0x%p, p#%u %s, p %u, %u.\n",
357 cdev, csk->port_id,
358 cdev->ports[csk->port_id]->name,
359 pmap->sport_base + idx, pmap->next);
360 return 0;
362 } while (idx != start);
363 spin_unlock_bh(&pmap->lock);
365 /* should not happen */
366 pr_warn("cdev 0x%p, p#%u %s, next %u?\n",
367 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
368 pmap->next);
369 return -EADDRNOTAVAIL;
372 static void sock_put_port(struct cxgbi_sock *csk)
374 struct cxgbi_device *cdev = csk->cdev;
375 struct cxgbi_ports_map *pmap = &cdev->pmap;
377 if (csk->saddr.sin_port) {
378 int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base;
380 csk->saddr.sin_port = 0;
381 if (idx < 0 || idx >= pmap->max_connect) {
382 pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
383 cdev, csk->port_id,
384 cdev->ports[csk->port_id]->name,
385 ntohs(csk->saddr.sin_port));
386 return;
389 spin_lock_bh(&pmap->lock);
390 pmap->port_csk[idx] = NULL;
391 pmap->used--;
392 spin_unlock_bh(&pmap->lock);
394 log_debug(1 << CXGBI_DBG_SOCK,
395 "cdev 0x%p, p#%u %s, release %u.\n",
396 cdev, csk->port_id, cdev->ports[csk->port_id]->name,
397 pmap->sport_base + idx);
399 cxgbi_sock_put(csk);
404 * iscsi tcp connection
406 void cxgbi_sock_free_cpl_skbs(struct cxgbi_sock *csk)
408 if (csk->cpl_close) {
409 kfree_skb(csk->cpl_close);
410 csk->cpl_close = NULL;
412 if (csk->cpl_abort_req) {
413 kfree_skb(csk->cpl_abort_req);
414 csk->cpl_abort_req = NULL;
416 if (csk->cpl_abort_rpl) {
417 kfree_skb(csk->cpl_abort_rpl);
418 csk->cpl_abort_rpl = NULL;
421 EXPORT_SYMBOL_GPL(cxgbi_sock_free_cpl_skbs);
423 static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev)
425 struct cxgbi_sock *csk = kzalloc(sizeof(*csk), GFP_NOIO);
427 if (!csk) {
428 pr_info("alloc csk %zu failed.\n", sizeof(*csk));
429 return NULL;
432 if (cdev->csk_alloc_cpls(csk) < 0) {
433 pr_info("csk 0x%p, alloc cpls failed.\n", csk);
434 kfree(csk);
435 return NULL;
438 spin_lock_init(&csk->lock);
439 kref_init(&csk->refcnt);
440 skb_queue_head_init(&csk->receive_queue);
441 skb_queue_head_init(&csk->write_queue);
442 setup_timer(&csk->retry_timer, NULL, (unsigned long)csk);
443 rwlock_init(&csk->callback_lock);
444 csk->cdev = cdev;
445 csk->flags = 0;
446 cxgbi_sock_set_state(csk, CTP_CLOSED);
448 log_debug(1 << CXGBI_DBG_SOCK, "cdev 0x%p, new csk 0x%p.\n", cdev, csk);
450 return csk;
453 static struct rtable *find_route_ipv4(__be32 saddr, __be32 daddr,
454 __be16 sport, __be16 dport, u8 tos)
456 struct rtable *rt;
457 struct flowi fl = {
458 .oif = 0,
459 .nl_u = {
460 .ip4_u = {
461 .daddr = daddr,
462 .saddr = saddr,
463 .tos = tos }
465 .proto = IPPROTO_TCP,
466 .uli_u = {
467 .ports = {
468 .sport = sport,
469 .dport = dport }
473 if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
474 return NULL;
476 return rt;
479 static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
481 struct sockaddr_in *daddr = (struct sockaddr_in *)dst_addr;
482 struct dst_entry *dst;
483 struct net_device *ndev;
484 struct cxgbi_device *cdev;
485 struct rtable *rt = NULL;
486 struct cxgbi_sock *csk = NULL;
487 unsigned int mtu = 0;
488 int port = 0xFFFF;
489 int err = 0;
491 if (daddr->sin_family != AF_INET) {
492 pr_info("address family 0x%x NOT supported.\n",
493 daddr->sin_family);
494 err = -EAFNOSUPPORT;
495 goto err_out;
498 rt = find_route_ipv4(0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
499 if (!rt) {
500 pr_info("no route to ipv4 0x%x, port %u.\n",
501 daddr->sin_addr.s_addr, daddr->sin_port);
502 err = -ENETUNREACH;
503 goto err_out;
505 dst = &rt->dst;
506 ndev = dst->neighbour->dev;
508 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
509 pr_info("multi-cast route %pI4, port %u, dev %s.\n",
510 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
511 ndev->name);
512 err = -ENETUNREACH;
513 goto rel_rt;
516 if (ndev->flags & IFF_LOOPBACK) {
517 ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
518 mtu = ndev->mtu;
519 pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
520 dst->neighbour->dev->name, ndev->name, mtu);
523 cdev = cxgbi_device_find_by_netdev(ndev, &port);
524 if (!cdev) {
525 pr_info("dst %pI4, %s, NOT cxgbi device.\n",
526 &daddr->sin_addr.s_addr, ndev->name);
527 err = -ENETUNREACH;
528 goto rel_rt;
530 log_debug(1 << CXGBI_DBG_SOCK,
531 "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n",
532 &daddr->sin_addr.s_addr, ntohs(daddr->sin_port),
533 port, ndev->name, cdev);
535 csk = cxgbi_sock_create(cdev);
536 if (!csk) {
537 err = -ENOMEM;
538 goto rel_rt;
540 csk->cdev = cdev;
541 csk->port_id = port;
542 csk->mtu = mtu;
543 csk->dst = dst;
544 csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
545 csk->daddr.sin_port = daddr->sin_port;
546 csk->daddr.sin_family = daddr->sin_family;
547 csk->saddr.sin_addr.s_addr = rt->rt_src;
549 return csk;
551 rel_rt:
552 ip_rt_put(rt);
553 if (csk)
554 cxgbi_sock_closed(csk);
555 err_out:
556 return ERR_PTR(err);
559 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
560 unsigned int opt)
562 csk->write_seq = csk->snd_nxt = csk->snd_una = snd_isn;
563 dst_confirm(csk->dst);
564 smp_mb();
565 cxgbi_sock_set_state(csk, CTP_ESTABLISHED);
567 EXPORT_SYMBOL_GPL(cxgbi_sock_established);
569 static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
571 log_debug(1 << CXGBI_DBG_SOCK,
572 "csk 0x%p, state %u, flags 0x%lx, conn 0x%p.\n",
573 csk, csk->state, csk->flags, csk->user_data);
575 if (csk->state != CTP_ESTABLISHED) {
576 read_lock_bh(&csk->callback_lock);
577 if (csk->user_data)
578 iscsi_conn_failure(csk->user_data,
579 ISCSI_ERR_CONN_FAILED);
580 read_unlock_bh(&csk->callback_lock);
584 void cxgbi_sock_closed(struct cxgbi_sock *csk)
586 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
587 csk, (csk)->state, (csk)->flags, (csk)->tid);
588 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
589 if (csk->state == CTP_ACTIVE_OPEN || csk->state == CTP_CLOSED)
590 return;
591 if (csk->saddr.sin_port)
592 sock_put_port(csk);
593 if (csk->dst)
594 dst_release(csk->dst);
595 csk->cdev->csk_release_offload_resources(csk);
596 cxgbi_sock_set_state(csk, CTP_CLOSED);
597 cxgbi_inform_iscsi_conn_closing(csk);
598 cxgbi_sock_put(csk);
600 EXPORT_SYMBOL_GPL(cxgbi_sock_closed);
602 static void need_active_close(struct cxgbi_sock *csk)
604 int data_lost;
605 int close_req = 0;
607 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
608 csk, (csk)->state, (csk)->flags, (csk)->tid);
609 spin_lock_bh(&csk->lock);
610 dst_confirm(csk->dst);
611 data_lost = skb_queue_len(&csk->receive_queue);
612 __skb_queue_purge(&csk->receive_queue);
614 if (csk->state == CTP_ACTIVE_OPEN)
615 cxgbi_sock_set_flag(csk, CTPF_ACTIVE_CLOSE_NEEDED);
616 else if (csk->state == CTP_ESTABLISHED) {
617 close_req = 1;
618 cxgbi_sock_set_state(csk, CTP_ACTIVE_CLOSE);
619 } else if (csk->state == CTP_PASSIVE_CLOSE) {
620 close_req = 1;
621 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
624 if (close_req) {
625 if (data_lost)
626 csk->cdev->csk_send_abort_req(csk);
627 else
628 csk->cdev->csk_send_close_req(csk);
631 spin_unlock_bh(&csk->lock);
634 void cxgbi_sock_fail_act_open(struct cxgbi_sock *csk, int errno)
636 pr_info("csk 0x%p,%u,%lx, %pI4:%u-%pI4:%u, err %d.\n",
637 csk, csk->state, csk->flags,
638 &csk->saddr.sin_addr.s_addr, csk->saddr.sin_port,
639 &csk->daddr.sin_addr.s_addr, csk->daddr.sin_port,
640 errno);
642 cxgbi_sock_set_state(csk, CTP_CONNECTING);
643 csk->err = errno;
644 cxgbi_sock_closed(csk);
646 EXPORT_SYMBOL_GPL(cxgbi_sock_fail_act_open);
648 void cxgbi_sock_act_open_req_arp_failure(void *handle, struct sk_buff *skb)
650 struct cxgbi_sock *csk = (struct cxgbi_sock *)skb->sk;
652 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
653 csk, (csk)->state, (csk)->flags, (csk)->tid);
654 cxgbi_sock_get(csk);
655 spin_lock_bh(&csk->lock);
656 if (csk->state == CTP_ACTIVE_OPEN)
657 cxgbi_sock_fail_act_open(csk, -EHOSTUNREACH);
658 spin_unlock_bh(&csk->lock);
659 cxgbi_sock_put(csk);
660 __kfree_skb(skb);
662 EXPORT_SYMBOL_GPL(cxgbi_sock_act_open_req_arp_failure);
664 void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
666 cxgbi_sock_get(csk);
667 spin_lock_bh(&csk->lock);
668 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
669 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD))
670 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
671 else {
672 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD);
673 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
674 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
675 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
676 csk, csk->state, csk->flags, csk->tid);
677 cxgbi_sock_closed(csk);
680 spin_unlock_bh(&csk->lock);
681 cxgbi_sock_put(csk);
683 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_abort_rpl);
685 void cxgbi_sock_rcv_peer_close(struct cxgbi_sock *csk)
687 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
688 csk, (csk)->state, (csk)->flags, (csk)->tid);
689 cxgbi_sock_get(csk);
690 spin_lock_bh(&csk->lock);
692 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
693 goto done;
695 switch (csk->state) {
696 case CTP_ESTABLISHED:
697 cxgbi_sock_set_state(csk, CTP_PASSIVE_CLOSE);
698 break;
699 case CTP_ACTIVE_CLOSE:
700 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_2);
701 break;
702 case CTP_CLOSE_WAIT_1:
703 cxgbi_sock_closed(csk);
704 break;
705 case CTP_ABORTING:
706 break;
707 default:
708 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
709 csk, csk->state, csk->flags, csk->tid);
711 cxgbi_inform_iscsi_conn_closing(csk);
712 done:
713 spin_unlock_bh(&csk->lock);
714 cxgbi_sock_put(csk);
716 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_peer_close);
718 void cxgbi_sock_rcv_close_conn_rpl(struct cxgbi_sock *csk, u32 snd_nxt)
720 log_debug(1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n",
721 csk, (csk)->state, (csk)->flags, (csk)->tid);
722 cxgbi_sock_get(csk);
723 spin_lock_bh(&csk->lock);
725 csk->snd_una = snd_nxt - 1;
726 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING))
727 goto done;
729 switch (csk->state) {
730 case CTP_ACTIVE_CLOSE:
731 cxgbi_sock_set_state(csk, CTP_CLOSE_WAIT_1);
732 break;
733 case CTP_CLOSE_WAIT_1:
734 case CTP_CLOSE_WAIT_2:
735 cxgbi_sock_closed(csk);
736 break;
737 case CTP_ABORTING:
738 break;
739 default:
740 pr_err("csk 0x%p,%u,0x%lx,%u, bad state.\n",
741 csk, csk->state, csk->flags, csk->tid);
743 done:
744 spin_unlock_bh(&csk->lock);
745 cxgbi_sock_put(csk);
747 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_close_conn_rpl);
749 void cxgbi_sock_rcv_wr_ack(struct cxgbi_sock *csk, unsigned int credits,
750 unsigned int snd_una, int seq_chk)
752 log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
753 "csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, snd_una %u,%d.\n",
754 csk, csk->state, csk->flags, csk->tid, credits,
755 csk->wr_cred, csk->wr_una_cred, snd_una, seq_chk);
757 spin_lock_bh(&csk->lock);
759 csk->wr_cred += credits;
760 if (csk->wr_una_cred > csk->wr_max_cred - csk->wr_cred)
761 csk->wr_una_cred = csk->wr_max_cred - csk->wr_cred;
763 while (credits) {
764 struct sk_buff *p = cxgbi_sock_peek_wr(csk);
766 if (unlikely(!p)) {
767 pr_err("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, empty.\n",
768 csk, csk->state, csk->flags, csk->tid, credits,
769 csk->wr_cred, csk->wr_una_cred);
770 break;
773 if (unlikely(credits < p->csum)) {
774 pr_warn("csk 0x%p,%u,0x%lx,%u, cr %u,%u+%u, < %u.\n",
775 csk, csk->state, csk->flags, csk->tid,
776 credits, csk->wr_cred, csk->wr_una_cred,
777 p->csum);
778 p->csum -= credits;
779 break;
780 } else {
781 cxgbi_sock_dequeue_wr(csk);
782 credits -= p->csum;
783 kfree_skb(p);
787 cxgbi_sock_check_wr_invariants(csk);
789 if (seq_chk) {
790 if (unlikely(before(snd_una, csk->snd_una))) {
791 pr_warn("csk 0x%p,%u,0x%lx,%u, snd_una %u/%u.",
792 csk, csk->state, csk->flags, csk->tid, snd_una,
793 csk->snd_una);
794 goto done;
797 if (csk->snd_una != snd_una) {
798 csk->snd_una = snd_una;
799 dst_confirm(csk->dst);
803 if (skb_queue_len(&csk->write_queue)) {
804 if (csk->cdev->csk_push_tx_frames(csk, 0))
805 cxgbi_conn_tx_open(csk);
806 } else
807 cxgbi_conn_tx_open(csk);
808 done:
809 spin_unlock_bh(&csk->lock);
811 EXPORT_SYMBOL_GPL(cxgbi_sock_rcv_wr_ack);
813 static unsigned int cxgbi_sock_find_best_mtu(struct cxgbi_sock *csk,
814 unsigned short mtu)
816 int i = 0;
818 while (i < csk->cdev->nmtus - 1 && csk->cdev->mtus[i + 1] <= mtu)
819 ++i;
821 return i;
824 unsigned int cxgbi_sock_select_mss(struct cxgbi_sock *csk, unsigned int pmtu)
826 unsigned int idx;
827 struct dst_entry *dst = csk->dst;
829 csk->advmss = dst_metric_advmss(dst);
831 if (csk->advmss > pmtu - 40)
832 csk->advmss = pmtu - 40;
833 if (csk->advmss < csk->cdev->mtus[0] - 40)
834 csk->advmss = csk->cdev->mtus[0] - 40;
835 idx = cxgbi_sock_find_best_mtu(csk, csk->advmss + 40);
837 return idx;
839 EXPORT_SYMBOL_GPL(cxgbi_sock_select_mss);
841 void cxgbi_sock_skb_entail(struct cxgbi_sock *csk, struct sk_buff *skb)
843 cxgbi_skcb_tcp_seq(skb) = csk->write_seq;
844 __skb_queue_tail(&csk->write_queue, skb);
846 EXPORT_SYMBOL_GPL(cxgbi_sock_skb_entail);
848 void cxgbi_sock_purge_wr_queue(struct cxgbi_sock *csk)
850 struct sk_buff *skb;
852 while ((skb = cxgbi_sock_dequeue_wr(csk)) != NULL)
853 kfree_skb(skb);
855 EXPORT_SYMBOL_GPL(cxgbi_sock_purge_wr_queue);
857 void cxgbi_sock_check_wr_invariants(const struct cxgbi_sock *csk)
859 int pending = cxgbi_sock_count_pending_wrs(csk);
861 if (unlikely(csk->wr_cred + pending != csk->wr_max_cred))
862 pr_err("csk 0x%p, tid %u, credit %u + %u != %u.\n",
863 csk, csk->tid, csk->wr_cred, pending, csk->wr_max_cred);
865 EXPORT_SYMBOL_GPL(cxgbi_sock_check_wr_invariants);
867 static int cxgbi_sock_send_pdus(struct cxgbi_sock *csk, struct sk_buff *skb)
869 struct cxgbi_device *cdev = csk->cdev;
870 struct sk_buff *next;
871 int err, copied = 0;
873 spin_lock_bh(&csk->lock);
875 if (csk->state != CTP_ESTABLISHED) {
876 log_debug(1 << CXGBI_DBG_PDU_TX,
877 "csk 0x%p,%u,0x%lx,%u, EAGAIN.\n",
878 csk, csk->state, csk->flags, csk->tid);
879 err = -EAGAIN;
880 goto out_err;
883 if (csk->err) {
884 log_debug(1 << CXGBI_DBG_PDU_TX,
885 "csk 0x%p,%u,0x%lx,%u, EPIPE %d.\n",
886 csk, csk->state, csk->flags, csk->tid, csk->err);
887 err = -EPIPE;
888 goto out_err;
891 if (csk->write_seq - csk->snd_una >= cdev->snd_win) {
892 log_debug(1 << CXGBI_DBG_PDU_TX,
893 "csk 0x%p,%u,0x%lx,%u, FULL %u-%u >= %u.\n",
894 csk, csk->state, csk->flags, csk->tid, csk->write_seq,
895 csk->snd_una, cdev->snd_win);
896 err = -ENOBUFS;
897 goto out_err;
900 while (skb) {
901 int frags = skb_shinfo(skb)->nr_frags +
902 (skb->len != skb->data_len);
904 if (unlikely(skb_headroom(skb) < cdev->skb_tx_rsvd)) {
905 pr_err("csk 0x%p, skb head %u < %u.\n",
906 csk, skb_headroom(skb), cdev->skb_tx_rsvd);
907 err = -EINVAL;
908 goto out_err;
911 if (frags >= SKB_WR_LIST_SIZE) {
912 pr_err("csk 0x%p, frags %d, %u,%u >%u.\n",
913 csk, skb_shinfo(skb)->nr_frags, skb->len,
914 skb->data_len, (uint)(SKB_WR_LIST_SIZE));
915 err = -EINVAL;
916 goto out_err;
919 next = skb->next;
920 skb->next = NULL;
921 cxgbi_skcb_set_flag(skb, SKCBF_TX_NEED_HDR);
922 cxgbi_sock_skb_entail(csk, skb);
923 copied += skb->len;
924 csk->write_seq += skb->len +
925 cxgbi_ulp_extra_len(cxgbi_skcb_ulp_mode(skb));
926 skb = next;
928 done:
929 if (likely(skb_queue_len(&csk->write_queue)))
930 cdev->csk_push_tx_frames(csk, 1);
931 spin_unlock_bh(&csk->lock);
932 return copied;
934 out_err:
935 if (copied == 0 && err == -EPIPE)
936 copied = csk->err ? csk->err : -EPIPE;
937 else
938 copied = err;
939 goto done;
943 * Direct Data Placement -
944 * Directly place the iSCSI Data-In or Data-Out PDU's payload into pre-posted
945 * final destination host-memory buffers based on the Initiator Task Tag (ITT)
946 * in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
947 * The host memory address is programmed into h/w in the format of pagepod
948 * entries.
949 * The location of the pagepod entry is encoded into ddp tag which is used as
950 * the base for ITT/TTT.
953 static unsigned char ddp_page_order[DDP_PGIDX_MAX] = {0, 1, 2, 4};
954 static unsigned char ddp_page_shift[DDP_PGIDX_MAX] = {12, 13, 14, 16};
955 static unsigned char page_idx = DDP_PGIDX_MAX;
957 static unsigned char sw_tag_idx_bits;
958 static unsigned char sw_tag_age_bits;
961 * Direct-Data Placement page size adjustment
963 static int ddp_adjust_page_table(void)
965 int i;
966 unsigned int base_order, order;
968 if (PAGE_SIZE < (1UL << ddp_page_shift[0])) {
969 pr_info("PAGE_SIZE 0x%lx too small, min 0x%lx\n",
970 PAGE_SIZE, 1UL << ddp_page_shift[0]);
971 return -EINVAL;
974 base_order = get_order(1UL << ddp_page_shift[0]);
975 order = get_order(1UL << PAGE_SHIFT);
977 for (i = 0; i < DDP_PGIDX_MAX; i++) {
978 /* first is the kernel page size, then just doubling */
979 ddp_page_order[i] = order - base_order + i;
980 ddp_page_shift[i] = PAGE_SHIFT + i;
982 return 0;
985 static int ddp_find_page_index(unsigned long pgsz)
987 int i;
989 for (i = 0; i < DDP_PGIDX_MAX; i++) {
990 if (pgsz == (1UL << ddp_page_shift[i]))
991 return i;
993 pr_info("ddp page size %lu not supported.\n", pgsz);
994 return DDP_PGIDX_MAX;
997 static void ddp_setup_host_page_size(void)
999 if (page_idx == DDP_PGIDX_MAX) {
1000 page_idx = ddp_find_page_index(PAGE_SIZE);
1002 if (page_idx == DDP_PGIDX_MAX) {
1003 pr_info("system PAGE %lu, update hw.\n", PAGE_SIZE);
1004 if (ddp_adjust_page_table() < 0) {
1005 pr_info("PAGE %lu, disable ddp.\n", PAGE_SIZE);
1006 return;
1008 page_idx = ddp_find_page_index(PAGE_SIZE);
1010 pr_info("system PAGE %lu, ddp idx %u.\n", PAGE_SIZE, page_idx);
1014 void cxgbi_ddp_page_size_factor(int *pgsz_factor)
1016 int i;
1018 for (i = 0; i < DDP_PGIDX_MAX; i++)
1019 pgsz_factor[i] = ddp_page_order[i];
1021 EXPORT_SYMBOL_GPL(cxgbi_ddp_page_size_factor);
1024 * DDP setup & teardown
1027 void cxgbi_ddp_ppod_set(struct cxgbi_pagepod *ppod,
1028 struct cxgbi_pagepod_hdr *hdr,
1029 struct cxgbi_gather_list *gl, unsigned int gidx)
1031 int i;
1033 memcpy(ppod, hdr, sizeof(*hdr));
1034 for (i = 0; i < (PPOD_PAGES_MAX + 1); i++, gidx++) {
1035 ppod->addr[i] = gidx < gl->nelem ?
1036 cpu_to_be64(gl->phys_addr[gidx]) : 0ULL;
1039 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_set);
1041 void cxgbi_ddp_ppod_clear(struct cxgbi_pagepod *ppod)
1043 memset(ppod, 0, sizeof(*ppod));
1045 EXPORT_SYMBOL_GPL(cxgbi_ddp_ppod_clear);
1047 static inline int ddp_find_unused_entries(struct cxgbi_ddp_info *ddp,
1048 unsigned int start, unsigned int max,
1049 unsigned int count,
1050 struct cxgbi_gather_list *gl)
1052 unsigned int i, j, k;
1054 /* not enough entries */
1055 if ((max - start) < count) {
1056 log_debug(1 << CXGBI_DBG_DDP,
1057 "NOT enough entries %u+%u < %u.\n", start, count, max);
1058 return -EBUSY;
1061 max -= count;
1062 spin_lock(&ddp->map_lock);
1063 for (i = start; i < max;) {
1064 for (j = 0, k = i; j < count; j++, k++) {
1065 if (ddp->gl_map[k])
1066 break;
1068 if (j == count) {
1069 for (j = 0, k = i; j < count; j++, k++)
1070 ddp->gl_map[k] = gl;
1071 spin_unlock(&ddp->map_lock);
1072 return i;
1074 i += j + 1;
1076 spin_unlock(&ddp->map_lock);
1077 log_debug(1 << CXGBI_DBG_DDP,
1078 "NO suitable entries %u available.\n", count);
1079 return -EBUSY;
1082 static inline void ddp_unmark_entries(struct cxgbi_ddp_info *ddp,
1083 int start, int count)
1085 spin_lock(&ddp->map_lock);
1086 memset(&ddp->gl_map[start], 0,
1087 count * sizeof(struct cxgbi_gather_list *));
1088 spin_unlock(&ddp->map_lock);
1091 static inline void ddp_gl_unmap(struct pci_dev *pdev,
1092 struct cxgbi_gather_list *gl)
1094 int i;
1096 for (i = 0; i < gl->nelem; i++)
1097 dma_unmap_page(&pdev->dev, gl->phys_addr[i], PAGE_SIZE,
1098 PCI_DMA_FROMDEVICE);
1101 static inline int ddp_gl_map(struct pci_dev *pdev,
1102 struct cxgbi_gather_list *gl)
1104 int i;
1106 for (i = 0; i < gl->nelem; i++) {
1107 gl->phys_addr[i] = dma_map_page(&pdev->dev, gl->pages[i], 0,
1108 PAGE_SIZE,
1109 PCI_DMA_FROMDEVICE);
1110 if (unlikely(dma_mapping_error(&pdev->dev, gl->phys_addr[i]))) {
1111 log_debug(1 << CXGBI_DBG_DDP,
1112 "page %d 0x%p, 0x%p dma mapping err.\n",
1113 i, gl->pages[i], pdev);
1114 goto unmap;
1117 return i;
1118 unmap:
1119 if (i) {
1120 unsigned int nelem = gl->nelem;
1122 gl->nelem = i;
1123 ddp_gl_unmap(pdev, gl);
1124 gl->nelem = nelem;
1126 return -EINVAL;
1129 static void ddp_release_gl(struct cxgbi_gather_list *gl,
1130 struct pci_dev *pdev)
1132 ddp_gl_unmap(pdev, gl);
1133 kfree(gl);
1136 static struct cxgbi_gather_list *ddp_make_gl(unsigned int xferlen,
1137 struct scatterlist *sgl,
1138 unsigned int sgcnt,
1139 struct pci_dev *pdev,
1140 gfp_t gfp)
1142 struct cxgbi_gather_list *gl;
1143 struct scatterlist *sg = sgl;
1144 struct page *sgpage = sg_page(sg);
1145 unsigned int sglen = sg->length;
1146 unsigned int sgoffset = sg->offset;
1147 unsigned int npages = (xferlen + sgoffset + PAGE_SIZE - 1) >>
1148 PAGE_SHIFT;
1149 int i = 1, j = 0;
1151 if (xferlen < DDP_THRESHOLD) {
1152 log_debug(1 << CXGBI_DBG_DDP,
1153 "xfer %u < threshold %u, no ddp.\n",
1154 xferlen, DDP_THRESHOLD);
1155 return NULL;
1158 gl = kzalloc(sizeof(struct cxgbi_gather_list) +
1159 npages * (sizeof(dma_addr_t) +
1160 sizeof(struct page *)), gfp);
1161 if (!gl) {
1162 log_debug(1 << CXGBI_DBG_DDP,
1163 "xfer %u, %u pages, OOM.\n", xferlen, npages);
1164 return NULL;
1167 log_debug(1 << CXGBI_DBG_DDP,
1168 "xfer %u, sgl %u, gl max %u.\n", xferlen, sgcnt, npages);
1170 gl->pages = (struct page **)&gl->phys_addr[npages];
1171 gl->nelem = npages;
1172 gl->length = xferlen;
1173 gl->offset = sgoffset;
1174 gl->pages[0] = sgpage;
1176 for (i = 1, sg = sg_next(sgl), j = 0; i < sgcnt;
1177 i++, sg = sg_next(sg)) {
1178 struct page *page = sg_page(sg);
1180 if (sgpage == page && sg->offset == sgoffset + sglen)
1181 sglen += sg->length;
1182 else {
1183 /* make sure the sgl is fit for ddp:
1184 * each has the same page size, and
1185 * all of the middle pages are used completely
1187 if ((j && sgoffset) || ((i != sgcnt - 1) &&
1188 ((sglen + sgoffset) & ~PAGE_MASK))) {
1189 log_debug(1 << CXGBI_DBG_DDP,
1190 "page %d/%u, %u + %u.\n",
1191 i, sgcnt, sgoffset, sglen);
1192 goto error_out;
1195 j++;
1196 if (j == gl->nelem || sg->offset) {
1197 log_debug(1 << CXGBI_DBG_DDP,
1198 "page %d/%u, offset %u.\n",
1199 j, gl->nelem, sg->offset);
1200 goto error_out;
1202 gl->pages[j] = page;
1203 sglen = sg->length;
1204 sgoffset = sg->offset;
1205 sgpage = page;
1208 gl->nelem = ++j;
1210 if (ddp_gl_map(pdev, gl) < 0)
1211 goto error_out;
1213 return gl;
1215 error_out:
1216 kfree(gl);
1217 return NULL;
1220 static void ddp_tag_release(struct cxgbi_hba *chba, u32 tag)
1222 struct cxgbi_device *cdev = chba->cdev;
1223 struct cxgbi_ddp_info *ddp = cdev->ddp;
1224 u32 idx;
1226 idx = (tag >> PPOD_IDX_SHIFT) & ddp->idx_mask;
1227 if (idx < ddp->nppods) {
1228 struct cxgbi_gather_list *gl = ddp->gl_map[idx];
1229 unsigned int npods;
1231 if (!gl || !gl->nelem) {
1232 pr_warn("tag 0x%x, idx %u, gl 0x%p, %u.\n",
1233 tag, idx, gl, gl ? gl->nelem : 0);
1234 return;
1236 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1237 log_debug(1 << CXGBI_DBG_DDP,
1238 "tag 0x%x, release idx %u, npods %u.\n",
1239 tag, idx, npods);
1240 cdev->csk_ddp_clear(chba, tag, idx, npods);
1241 ddp_unmark_entries(ddp, idx, npods);
1242 ddp_release_gl(gl, ddp->pdev);
1243 } else
1244 pr_warn("tag 0x%x, idx %u > max %u.\n", tag, idx, ddp->nppods);
1247 static int ddp_tag_reserve(struct cxgbi_sock *csk, unsigned int tid,
1248 u32 sw_tag, u32 *tagp, struct cxgbi_gather_list *gl,
1249 gfp_t gfp)
1251 struct cxgbi_device *cdev = csk->cdev;
1252 struct cxgbi_ddp_info *ddp = cdev->ddp;
1253 struct cxgbi_tag_format *tformat = &cdev->tag_format;
1254 struct cxgbi_pagepod_hdr hdr;
1255 unsigned int npods;
1256 int idx = -1;
1257 int err = -ENOMEM;
1258 u32 tag;
1260 npods = (gl->nelem + PPOD_PAGES_MAX - 1) >> PPOD_PAGES_SHIFT;
1261 if (ddp->idx_last == ddp->nppods)
1262 idx = ddp_find_unused_entries(ddp, 0, ddp->nppods,
1263 npods, gl);
1264 else {
1265 idx = ddp_find_unused_entries(ddp, ddp->idx_last + 1,
1266 ddp->nppods, npods,
1267 gl);
1268 if (idx < 0 && ddp->idx_last >= npods) {
1269 idx = ddp_find_unused_entries(ddp, 0,
1270 min(ddp->idx_last + npods, ddp->nppods),
1271 npods, gl);
1274 if (idx < 0) {
1275 log_debug(1 << CXGBI_DBG_DDP,
1276 "xferlen %u, gl %u, npods %u NO DDP.\n",
1277 gl->length, gl->nelem, npods);
1278 return idx;
1281 tag = cxgbi_ddp_tag_base(tformat, sw_tag);
1282 tag |= idx << PPOD_IDX_SHIFT;
1284 hdr.rsvd = 0;
1285 hdr.vld_tid = htonl(PPOD_VALID_FLAG | PPOD_TID(tid));
1286 hdr.pgsz_tag_clr = htonl(tag & ddp->rsvd_tag_mask);
1287 hdr.max_offset = htonl(gl->length);
1288 hdr.page_offset = htonl(gl->offset);
1290 err = cdev->csk_ddp_set(csk, &hdr, idx, npods, gl);
1291 if (err < 0)
1292 goto unmark_entries;
1294 ddp->idx_last = idx;
1295 log_debug(1 << CXGBI_DBG_DDP,
1296 "xfer %u, gl %u,%u, tid 0x%x, tag 0x%x->0x%x(%u,%u).\n",
1297 gl->length, gl->nelem, gl->offset, tid, sw_tag, tag, idx,
1298 npods);
1299 *tagp = tag;
1300 return 0;
1302 unmark_entries:
1303 ddp_unmark_entries(ddp, idx, npods);
1304 return err;
1307 int cxgbi_ddp_reserve(struct cxgbi_sock *csk, unsigned int *tagp,
1308 unsigned int sw_tag, unsigned int xferlen,
1309 struct scatterlist *sgl, unsigned int sgcnt, gfp_t gfp)
1311 struct cxgbi_device *cdev = csk->cdev;
1312 struct cxgbi_tag_format *tformat = &cdev->tag_format;
1313 struct cxgbi_gather_list *gl;
1314 int err;
1316 if (page_idx >= DDP_PGIDX_MAX || !cdev->ddp ||
1317 xferlen < DDP_THRESHOLD) {
1318 log_debug(1 << CXGBI_DBG_DDP,
1319 "pgidx %u, xfer %u, NO ddp.\n", page_idx, xferlen);
1320 return -EINVAL;
1323 if (!cxgbi_sw_tag_usable(tformat, sw_tag)) {
1324 log_debug(1 << CXGBI_DBG_DDP,
1325 "sw_tag 0x%x NOT usable.\n", sw_tag);
1326 return -EINVAL;
1329 gl = ddp_make_gl(xferlen, sgl, sgcnt, cdev->pdev, gfp);
1330 if (!gl)
1331 return -ENOMEM;
1333 err = ddp_tag_reserve(csk, csk->tid, sw_tag, tagp, gl, gfp);
1334 if (err < 0)
1335 ddp_release_gl(gl, cdev->pdev);
1337 return err;
1340 static void ddp_destroy(struct kref *kref)
1342 struct cxgbi_ddp_info *ddp = container_of(kref,
1343 struct cxgbi_ddp_info,
1344 refcnt);
1345 struct cxgbi_device *cdev = ddp->cdev;
1346 int i = 0;
1348 pr_info("kref 0, destroy ddp 0x%p, cdev 0x%p.\n", ddp, cdev);
1350 while (i < ddp->nppods) {
1351 struct cxgbi_gather_list *gl = ddp->gl_map[i];
1353 if (gl) {
1354 int npods = (gl->nelem + PPOD_PAGES_MAX - 1)
1355 >> PPOD_PAGES_SHIFT;
1356 pr_info("cdev 0x%p, ddp %d + %d.\n", cdev, i, npods);
1357 kfree(gl);
1358 i += npods;
1359 } else
1360 i++;
1362 cxgbi_free_big_mem(ddp);
1365 int cxgbi_ddp_cleanup(struct cxgbi_device *cdev)
1367 struct cxgbi_ddp_info *ddp = cdev->ddp;
1369 log_debug(1 << CXGBI_DBG_DDP,
1370 "cdev 0x%p, release ddp 0x%p.\n", cdev, ddp);
1371 cdev->ddp = NULL;
1372 if (ddp)
1373 return kref_put(&ddp->refcnt, ddp_destroy);
1374 return 0;
1376 EXPORT_SYMBOL_GPL(cxgbi_ddp_cleanup);
1378 int cxgbi_ddp_init(struct cxgbi_device *cdev,
1379 unsigned int llimit, unsigned int ulimit,
1380 unsigned int max_txsz, unsigned int max_rxsz)
1382 struct cxgbi_ddp_info *ddp;
1383 unsigned int ppmax, bits;
1385 ppmax = (ulimit - llimit + 1) >> PPOD_SIZE_SHIFT;
1386 bits = __ilog2_u32(ppmax) + 1;
1387 if (bits > PPOD_IDX_MAX_SIZE)
1388 bits = PPOD_IDX_MAX_SIZE;
1389 ppmax = (1 << (bits - 1)) - 1;
1391 ddp = cxgbi_alloc_big_mem(sizeof(struct cxgbi_ddp_info) +
1392 ppmax * (sizeof(struct cxgbi_gather_list *) +
1393 sizeof(struct sk_buff *)),
1394 GFP_KERNEL);
1395 if (!ddp) {
1396 pr_warn("cdev 0x%p, ddp ppmax %u OOM.\n", cdev, ppmax);
1397 return -ENOMEM;
1399 ddp->gl_map = (struct cxgbi_gather_list **)(ddp + 1);
1400 cdev->ddp = ddp;
1402 spin_lock_init(&ddp->map_lock);
1403 kref_init(&ddp->refcnt);
1405 ddp->cdev = cdev;
1406 ddp->pdev = cdev->pdev;
1407 ddp->llimit = llimit;
1408 ddp->ulimit = ulimit;
1409 ddp->max_txsz = min_t(unsigned int, max_txsz, ULP2_MAX_PKT_SIZE);
1410 ddp->max_rxsz = min_t(unsigned int, max_rxsz, ULP2_MAX_PKT_SIZE);
1411 ddp->nppods = ppmax;
1412 ddp->idx_last = ppmax;
1413 ddp->idx_bits = bits;
1414 ddp->idx_mask = (1 << bits) - 1;
1415 ddp->rsvd_tag_mask = (1 << (bits + PPOD_IDX_SHIFT)) - 1;
1417 cdev->tag_format.sw_bits = sw_tag_idx_bits + sw_tag_age_bits;
1418 cdev->tag_format.rsvd_bits = ddp->idx_bits;
1419 cdev->tag_format.rsvd_shift = PPOD_IDX_SHIFT;
1420 cdev->tag_format.rsvd_mask = (1 << cdev->tag_format.rsvd_bits) - 1;
1422 pr_info("%s tag format, sw %u, rsvd %u,%u, mask 0x%x.\n",
1423 cdev->ports[0]->name, cdev->tag_format.sw_bits,
1424 cdev->tag_format.rsvd_bits, cdev->tag_format.rsvd_shift,
1425 cdev->tag_format.rsvd_mask);
1427 cdev->tx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1428 ddp->max_txsz - ISCSI_PDU_NONPAYLOAD_LEN);
1429 cdev->rx_max_size = min_t(unsigned int, ULP2_MAX_PDU_PAYLOAD,
1430 ddp->max_rxsz - ISCSI_PDU_NONPAYLOAD_LEN);
1432 log_debug(1 << CXGBI_DBG_DDP,
1433 "%s max payload size: %u/%u, %u/%u.\n",
1434 cdev->ports[0]->name, cdev->tx_max_size, ddp->max_txsz,
1435 cdev->rx_max_size, ddp->max_rxsz);
1436 return 0;
1438 EXPORT_SYMBOL_GPL(cxgbi_ddp_init);
1441 * APIs interacting with open-iscsi libraries
1444 static unsigned char padding[4];
1446 static void task_release_itt(struct iscsi_task *task, itt_t hdr_itt)
1448 struct scsi_cmnd *sc = task->sc;
1449 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1450 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1451 struct cxgbi_hba *chba = cconn->chba;
1452 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1453 u32 tag = ntohl((__force u32)hdr_itt);
1455 log_debug(1 << CXGBI_DBG_DDP,
1456 "cdev 0x%p, release tag 0x%x.\n", chba->cdev, tag);
1457 if (sc &&
1458 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE) &&
1459 cxgbi_is_ddp_tag(tformat, tag))
1460 ddp_tag_release(chba, tag);
1463 static int task_reserve_itt(struct iscsi_task *task, itt_t *hdr_itt)
1465 struct scsi_cmnd *sc = task->sc;
1466 struct iscsi_conn *conn = task->conn;
1467 struct iscsi_session *sess = conn->session;
1468 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1469 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1470 struct cxgbi_hba *chba = cconn->chba;
1471 struct cxgbi_tag_format *tformat = &chba->cdev->tag_format;
1472 u32 sw_tag = (sess->age << cconn->task_idx_bits) | task->itt;
1473 u32 tag = 0;
1474 int err = -EINVAL;
1476 if (sc &&
1477 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_FROM_DEVICE)) {
1478 err = cxgbi_ddp_reserve(cconn->cep->csk, &tag, sw_tag,
1479 scsi_in(sc)->length,
1480 scsi_in(sc)->table.sgl,
1481 scsi_in(sc)->table.nents,
1482 GFP_ATOMIC);
1483 if (err < 0)
1484 log_debug(1 << CXGBI_DBG_DDP,
1485 "csk 0x%p, R task 0x%p, %u,%u, no ddp.\n",
1486 cconn->cep->csk, task, scsi_in(sc)->length,
1487 scsi_in(sc)->table.nents);
1490 if (err < 0)
1491 tag = cxgbi_set_non_ddp_tag(tformat, sw_tag);
1492 /* the itt need to sent in big-endian order */
1493 *hdr_itt = (__force itt_t)htonl(tag);
1495 log_debug(1 << CXGBI_DBG_DDP,
1496 "cdev 0x%p, task 0x%p, 0x%x(0x%x,0x%x)->0x%x/0x%x.\n",
1497 chba->cdev, task, sw_tag, task->itt, sess->age, tag, *hdr_itt);
1498 return 0;
1501 void cxgbi_parse_pdu_itt(struct iscsi_conn *conn, itt_t itt, int *idx, int *age)
1503 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1504 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1505 struct cxgbi_device *cdev = cconn->chba->cdev;
1506 u32 tag = ntohl((__force u32) itt);
1507 u32 sw_bits;
1509 sw_bits = cxgbi_tag_nonrsvd_bits(&cdev->tag_format, tag);
1510 if (idx)
1511 *idx = sw_bits & ((1 << cconn->task_idx_bits) - 1);
1512 if (age)
1513 *age = (sw_bits >> cconn->task_idx_bits) & ISCSI_AGE_MASK;
1515 log_debug(1 << CXGBI_DBG_DDP,
1516 "cdev 0x%p, tag 0x%x/0x%x, -> 0x%x(0x%x,0x%x).\n",
1517 cdev, tag, itt, sw_bits, idx ? *idx : 0xFFFFF,
1518 age ? *age : 0xFF);
1520 EXPORT_SYMBOL_GPL(cxgbi_parse_pdu_itt);
1522 void cxgbi_conn_tx_open(struct cxgbi_sock *csk)
1524 struct iscsi_conn *conn = csk->user_data;
1526 if (conn) {
1527 log_debug(1 << CXGBI_DBG_SOCK,
1528 "csk 0x%p, cid %d.\n", csk, conn->id);
1529 iscsi_conn_queue_work(conn);
1532 EXPORT_SYMBOL_GPL(cxgbi_conn_tx_open);
1535 * pdu receive, interact with libiscsi_tcp
1537 static inline int read_pdu_skb(struct iscsi_conn *conn,
1538 struct sk_buff *skb,
1539 unsigned int offset,
1540 int offloaded)
1542 int status = 0;
1543 int bytes_read;
1545 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
1546 switch (status) {
1547 case ISCSI_TCP_CONN_ERR:
1548 pr_info("skb 0x%p, off %u, %d, TCP_ERR.\n",
1549 skb, offset, offloaded);
1550 return -EIO;
1551 case ISCSI_TCP_SUSPENDED:
1552 log_debug(1 << CXGBI_DBG_PDU_RX,
1553 "skb 0x%p, off %u, %d, TCP_SUSPEND, rc %d.\n",
1554 skb, offset, offloaded, bytes_read);
1555 /* no transfer - just have caller flush queue */
1556 return bytes_read;
1557 case ISCSI_TCP_SKB_DONE:
1558 pr_info("skb 0x%p, off %u, %d, TCP_SKB_DONE.\n",
1559 skb, offset, offloaded);
1561 * pdus should always fit in the skb and we should get
1562 * segment done notifcation.
1564 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
1565 return -EFAULT;
1566 case ISCSI_TCP_SEGMENT_DONE:
1567 log_debug(1 << CXGBI_DBG_PDU_RX,
1568 "skb 0x%p, off %u, %d, TCP_SEG_DONE, rc %d.\n",
1569 skb, offset, offloaded, bytes_read);
1570 return bytes_read;
1571 default:
1572 pr_info("skb 0x%p, off %u, %d, invalid status %d.\n",
1573 skb, offset, offloaded, status);
1574 return -EINVAL;
1578 static int skb_read_pdu_bhs(struct iscsi_conn *conn, struct sk_buff *skb)
1580 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1582 log_debug(1 << CXGBI_DBG_PDU_RX,
1583 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1584 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1586 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
1587 pr_info("conn 0x%p, skb 0x%p, not hdr.\n", conn, skb);
1588 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
1589 return -EIO;
1592 if (conn->hdrdgst_en &&
1593 cxgbi_skcb_test_flag(skb, SKCBF_RX_HCRC_ERR)) {
1594 pr_info("conn 0x%p, skb 0x%p, hcrc.\n", conn, skb);
1595 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
1596 return -EIO;
1599 return read_pdu_skb(conn, skb, 0, 0);
1602 static int skb_read_pdu_data(struct iscsi_conn *conn, struct sk_buff *lskb,
1603 struct sk_buff *skb, unsigned int offset)
1605 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
1606 bool offloaded = 0;
1607 int opcode = tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK;
1609 log_debug(1 << CXGBI_DBG_PDU_RX,
1610 "conn 0x%p, skb 0x%p, len %u, flag 0x%lx.\n",
1611 conn, skb, skb->len, cxgbi_skcb_flags(skb));
1613 if (conn->datadgst_en &&
1614 cxgbi_skcb_test_flag(lskb, SKCBF_RX_DCRC_ERR)) {
1615 pr_info("conn 0x%p, skb 0x%p, dcrc 0x%lx.\n",
1616 conn, lskb, cxgbi_skcb_flags(lskb));
1617 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1618 return -EIO;
1621 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
1622 return 0;
1624 /* coalesced, add header digest length */
1625 if (lskb == skb && conn->hdrdgst_en)
1626 offset += ISCSI_DIGEST_SIZE;
1628 if (cxgbi_skcb_test_flag(lskb, SKCBF_RX_DATA_DDPD))
1629 offloaded = 1;
1631 if (opcode == ISCSI_OP_SCSI_DATA_IN)
1632 log_debug(1 << CXGBI_DBG_PDU_RX,
1633 "skb 0x%p, op 0x%x, itt 0x%x, %u %s ddp'ed.\n",
1634 skb, opcode, ntohl(tcp_conn->in.hdr->itt),
1635 tcp_conn->in.datalen, offloaded ? "is" : "not");
1637 return read_pdu_skb(conn, skb, offset, offloaded);
1640 static void csk_return_rx_credits(struct cxgbi_sock *csk, int copied)
1642 struct cxgbi_device *cdev = csk->cdev;
1643 int must_send;
1644 u32 credits;
1646 log_debug(1 << CXGBI_DBG_PDU_RX,
1647 "csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
1648 csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
1649 csk->rcv_wup, cdev->rx_credit_thres,
1650 cdev->rcv_win);
1652 if (csk->state != CTP_ESTABLISHED)
1653 return;
1655 credits = csk->copied_seq - csk->rcv_wup;
1656 if (unlikely(!credits))
1657 return;
1658 if (unlikely(cdev->rx_credit_thres == 0))
1659 return;
1661 must_send = credits + 16384 >= cdev->rcv_win;
1662 if (must_send || credits >= cdev->rx_credit_thres)
1663 csk->rcv_wup += cdev->csk_send_rx_credits(csk, credits);
1666 void cxgbi_conn_pdu_ready(struct cxgbi_sock *csk)
1668 struct cxgbi_device *cdev = csk->cdev;
1669 struct iscsi_conn *conn = csk->user_data;
1670 struct sk_buff *skb;
1671 unsigned int read = 0;
1672 int err = 0;
1674 log_debug(1 << CXGBI_DBG_PDU_RX,
1675 "csk 0x%p, conn 0x%p.\n", csk, conn);
1677 if (unlikely(!conn || conn->suspend_rx)) {
1678 log_debug(1 << CXGBI_DBG_PDU_RX,
1679 "csk 0x%p, conn 0x%p, id %d, suspend_rx %lu!\n",
1680 csk, conn, conn ? conn->id : 0xFF,
1681 conn ? conn->suspend_rx : 0xFF);
1682 return;
1685 while (!err) {
1686 skb = skb_peek(&csk->receive_queue);
1687 if (!skb ||
1688 !(cxgbi_skcb_test_flag(skb, SKCBF_RX_STATUS))) {
1689 if (skb)
1690 log_debug(1 << CXGBI_DBG_PDU_RX,
1691 "skb 0x%p, NOT ready 0x%lx.\n",
1692 skb, cxgbi_skcb_flags(skb));
1693 break;
1695 __skb_unlink(skb, &csk->receive_queue);
1697 read += cxgbi_skcb_rx_pdulen(skb);
1698 log_debug(1 << CXGBI_DBG_PDU_RX,
1699 "csk 0x%p, skb 0x%p,%u,f 0x%lx, pdu len %u.\n",
1700 csk, skb, skb->len, cxgbi_skcb_flags(skb),
1701 cxgbi_skcb_rx_pdulen(skb));
1703 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_COALESCED)) {
1704 err = skb_read_pdu_bhs(conn, skb);
1705 if (err < 0) {
1706 pr_err("coalesced bhs, csk 0x%p, skb 0x%p,%u, "
1707 "f 0x%lx, plen %u.\n",
1708 csk, skb, skb->len,
1709 cxgbi_skcb_flags(skb),
1710 cxgbi_skcb_rx_pdulen(skb));
1711 goto skb_done;
1713 err = skb_read_pdu_data(conn, skb, skb,
1714 err + cdev->skb_rx_extra);
1715 if (err < 0)
1716 pr_err("coalesced data, csk 0x%p, skb 0x%p,%u, "
1717 "f 0x%lx, plen %u.\n",
1718 csk, skb, skb->len,
1719 cxgbi_skcb_flags(skb),
1720 cxgbi_skcb_rx_pdulen(skb));
1721 } else {
1722 err = skb_read_pdu_bhs(conn, skb);
1723 if (err < 0) {
1724 pr_err("bhs, csk 0x%p, skb 0x%p,%u, "
1725 "f 0x%lx, plen %u.\n",
1726 csk, skb, skb->len,
1727 cxgbi_skcb_flags(skb),
1728 cxgbi_skcb_rx_pdulen(skb));
1729 goto skb_done;
1732 if (cxgbi_skcb_test_flag(skb, SKCBF_RX_DATA)) {
1733 struct sk_buff *dskb;
1735 dskb = skb_peek(&csk->receive_queue);
1736 if (!dskb) {
1737 pr_err("csk 0x%p, skb 0x%p,%u, f 0x%lx,"
1738 " plen %u, NO data.\n",
1739 csk, skb, skb->len,
1740 cxgbi_skcb_flags(skb),
1741 cxgbi_skcb_rx_pdulen(skb));
1742 err = -EIO;
1743 goto skb_done;
1745 __skb_unlink(dskb, &csk->receive_queue);
1747 err = skb_read_pdu_data(conn, skb, dskb, 0);
1748 if (err < 0)
1749 pr_err("data, csk 0x%p, skb 0x%p,%u, "
1750 "f 0x%lx, plen %u, dskb 0x%p,"
1751 "%u.\n",
1752 csk, skb, skb->len,
1753 cxgbi_skcb_flags(skb),
1754 cxgbi_skcb_rx_pdulen(skb),
1755 dskb, dskb->len);
1756 __kfree_skb(dskb);
1757 } else
1758 err = skb_read_pdu_data(conn, skb, skb, 0);
1760 skb_done:
1761 __kfree_skb(skb);
1763 if (err < 0)
1764 break;
1767 log_debug(1 << CXGBI_DBG_PDU_RX, "csk 0x%p, read %u.\n", csk, read);
1768 if (read) {
1769 csk->copied_seq += read;
1770 csk_return_rx_credits(csk, read);
1771 conn->rxdata_octets += read;
1774 if (err < 0) {
1775 pr_info("csk 0x%p, 0x%p, rx failed %d, read %u.\n",
1776 csk, conn, err, read);
1777 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1780 EXPORT_SYMBOL_GPL(cxgbi_conn_pdu_ready);
1782 static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
1783 unsigned int offset, unsigned int *off,
1784 struct scatterlist **sgp)
1786 int i;
1787 struct scatterlist *sg;
1789 for_each_sg(sgl, sg, sgcnt, i) {
1790 if (offset < sg->length) {
1791 *off = offset;
1792 *sgp = sg;
1793 return 0;
1795 offset -= sg->length;
1797 return -EFAULT;
1800 static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
1801 unsigned int dlen, skb_frag_t *frags,
1802 int frag_max)
1804 unsigned int datalen = dlen;
1805 unsigned int sglen = sg->length - sgoffset;
1806 struct page *page = sg_page(sg);
1807 int i;
1809 i = 0;
1810 do {
1811 unsigned int copy;
1813 if (!sglen) {
1814 sg = sg_next(sg);
1815 if (!sg) {
1816 pr_warn("sg %d NULL, len %u/%u.\n",
1817 i, datalen, dlen);
1818 return -EINVAL;
1820 sgoffset = 0;
1821 sglen = sg->length;
1822 page = sg_page(sg);
1825 copy = min(datalen, sglen);
1826 if (i && page == frags[i - 1].page &&
1827 sgoffset + sg->offset ==
1828 frags[i - 1].page_offset + frags[i - 1].size) {
1829 frags[i - 1].size += copy;
1830 } else {
1831 if (i >= frag_max) {
1832 pr_warn("too many pages %u, dlen %u.\n",
1833 frag_max, dlen);
1834 return -EINVAL;
1837 frags[i].page = page;
1838 frags[i].page_offset = sg->offset + sgoffset;
1839 frags[i].size = copy;
1840 i++;
1842 datalen -= copy;
1843 sgoffset += copy;
1844 sglen -= copy;
1845 } while (datalen);
1847 return i;
1850 int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
1852 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
1853 struct cxgbi_conn *cconn = tcp_conn->dd_data;
1854 struct cxgbi_device *cdev = cconn->chba->cdev;
1855 struct iscsi_conn *conn = task->conn;
1856 struct iscsi_tcp_task *tcp_task = task->dd_data;
1857 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1858 struct scsi_cmnd *sc = task->sc;
1859 int headroom = SKB_TX_ISCSI_PDU_HEADER_MAX;
1861 tcp_task->dd_data = tdata;
1862 task->hdr = NULL;
1864 if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) &&
1865 (opcode == ISCSI_OP_SCSI_DATA_OUT ||
1866 (opcode == ISCSI_OP_SCSI_CMD &&
1867 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
1868 /* data could goes into skb head */
1869 headroom += min_t(unsigned int,
1870 SKB_MAX_HEAD(cdev->skb_tx_rsvd),
1871 conn->max_xmit_dlength);
1873 tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
1874 if (!tdata->skb) {
1875 pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
1876 cdev->skb_tx_rsvd, headroom, opcode);
1877 return -ENOMEM;
1880 skb_reserve(tdata->skb, cdev->skb_tx_rsvd);
1881 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
1882 task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */
1884 /* data_out uses scsi_cmd's itt */
1885 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
1886 task_reserve_itt(task, &task->hdr->itt);
1888 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1889 "task 0x%p, op 0x%x, skb 0x%p,%u+%u/%u, itt 0x%x.\n",
1890 task, opcode, tdata->skb, cdev->skb_tx_rsvd, headroom,
1891 conn->max_xmit_dlength, ntohl(task->hdr->itt));
1893 return 0;
1895 EXPORT_SYMBOL_GPL(cxgbi_conn_alloc_pdu);
1897 static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
1899 if (hcrc || dcrc) {
1900 u8 submode = 0;
1902 if (hcrc)
1903 submode |= 1;
1904 if (dcrc)
1905 submode |= 2;
1906 cxgbi_skcb_ulp_mode(skb) = (ULP2_MODE_ISCSI << 4) | submode;
1907 } else
1908 cxgbi_skcb_ulp_mode(skb) = 0;
1911 int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1912 unsigned int count)
1914 struct iscsi_conn *conn = task->conn;
1915 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
1916 struct sk_buff *skb = tdata->skb;
1917 unsigned int datalen = count;
1918 int i, padlen = iscsi_padding(count);
1919 struct page *pg;
1921 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
1922 "task 0x%p,0x%p, skb 0x%p, 0x%x,0x%x,0x%x, %u+%u.\n",
1923 task, task->sc, skb, (*skb->data) & ISCSI_OPCODE_MASK,
1924 ntohl(task->cmdsn), ntohl(task->hdr->itt), offset, count);
1926 skb_put(skb, task->hdr_len);
1927 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
1928 if (!count)
1929 return 0;
1931 if (task->sc) {
1932 struct scsi_data_buffer *sdb = scsi_out(task->sc);
1933 struct scatterlist *sg = NULL;
1934 int err;
1936 tdata->offset = offset;
1937 tdata->count = count;
1938 err = sgl_seek_offset(
1939 sdb->table.sgl, sdb->table.nents,
1940 tdata->offset, &tdata->sgoffset, &sg);
1941 if (err < 0) {
1942 pr_warn("tpdu, sgl %u, bad offset %u/%u.\n",
1943 sdb->table.nents, tdata->offset, sdb->length);
1944 return err;
1946 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
1947 tdata->frags, MAX_PDU_FRAGS);
1948 if (err < 0) {
1949 pr_warn("tpdu, sgl %u, bad offset %u + %u.\n",
1950 sdb->table.nents, tdata->offset, tdata->count);
1951 return err;
1953 tdata->nr_frags = err;
1955 if (tdata->nr_frags > MAX_SKB_FRAGS ||
1956 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
1957 char *dst = skb->data + task->hdr_len;
1958 skb_frag_t *frag = tdata->frags;
1960 /* data fits in the skb's headroom */
1961 for (i = 0; i < tdata->nr_frags; i++, frag++) {
1962 char *src = kmap_atomic(frag->page,
1963 KM_SOFTIRQ0);
1965 memcpy(dst, src+frag->page_offset, frag->size);
1966 dst += frag->size;
1967 kunmap_atomic(src, KM_SOFTIRQ0);
1969 if (padlen) {
1970 memset(dst, 0, padlen);
1971 padlen = 0;
1973 skb_put(skb, count + padlen);
1974 } else {
1975 /* data fit into frag_list */
1976 for (i = 0; i < tdata->nr_frags; i++)
1977 get_page(tdata->frags[i].page);
1979 memcpy(skb_shinfo(skb)->frags, tdata->frags,
1980 sizeof(skb_frag_t) * tdata->nr_frags);
1981 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
1982 skb->len += count;
1983 skb->data_len += count;
1984 skb->truesize += count;
1987 } else {
1988 pg = virt_to_page(task->data);
1990 get_page(pg);
1991 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
1992 count);
1993 skb->len += count;
1994 skb->data_len += count;
1995 skb->truesize += count;
1998 if (padlen) {
1999 i = skb_shinfo(skb)->nr_frags;
2000 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
2001 virt_to_page(padding), offset_in_page(padding),
2002 padlen);
2004 skb->data_len += padlen;
2005 skb->truesize += padlen;
2006 skb->len += padlen;
2009 return 0;
2011 EXPORT_SYMBOL_GPL(cxgbi_conn_init_pdu);
2013 int cxgbi_conn_xmit_pdu(struct iscsi_task *task)
2015 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
2016 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2017 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2018 struct sk_buff *skb = tdata->skb;
2019 unsigned int datalen;
2020 int err;
2022 if (!skb) {
2023 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2024 "task 0x%p, skb NULL.\n", task);
2025 return 0;
2028 datalen = skb->data_len;
2029 tdata->skb = NULL;
2030 err = cxgbi_sock_send_pdus(cconn->cep->csk, skb);
2031 if (err > 0) {
2032 int pdulen = err;
2034 log_debug(1 << CXGBI_DBG_PDU_TX,
2035 "task 0x%p,0x%p, skb 0x%p, len %u/%u, rv %d.\n",
2036 task, task->sc, skb, skb->len, skb->data_len, err);
2038 if (task->conn->hdrdgst_en)
2039 pdulen += ISCSI_DIGEST_SIZE;
2041 if (datalen && task->conn->datadgst_en)
2042 pdulen += ISCSI_DIGEST_SIZE;
2044 task->conn->txdata_octets += pdulen;
2045 return 0;
2048 if (err == -EAGAIN || err == -ENOBUFS) {
2049 log_debug(1 << CXGBI_DBG_PDU_TX,
2050 "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n",
2051 task, skb, skb->len, skb->data_len, err);
2052 /* reset skb to send when we are called again */
2053 tdata->skb = skb;
2054 return err;
2057 kfree_skb(skb);
2058 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX,
2059 "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
2060 task->itt, skb, skb->len, skb->data_len, err);
2061 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
2062 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
2063 return err;
2065 EXPORT_SYMBOL_GPL(cxgbi_conn_xmit_pdu);
2067 void cxgbi_cleanup_task(struct iscsi_task *task)
2069 struct cxgbi_task_data *tdata = iscsi_task_cxgbi_data(task);
2071 log_debug(1 << CXGBI_DBG_ISCSI,
2072 "task 0x%p, skb 0x%p, itt 0x%x.\n",
2073 task, tdata->skb, task->hdr_itt);
2075 /* never reached the xmit task callout */
2076 if (tdata->skb)
2077 __kfree_skb(tdata->skb);
2078 memset(tdata, 0, sizeof(*tdata));
2080 task_release_itt(task, task->hdr_itt);
2081 iscsi_tcp_cleanup_task(task);
2083 EXPORT_SYMBOL_GPL(cxgbi_cleanup_task);
2085 void cxgbi_get_conn_stats(struct iscsi_cls_conn *cls_conn,
2086 struct iscsi_stats *stats)
2088 struct iscsi_conn *conn = cls_conn->dd_data;
2090 stats->txdata_octets = conn->txdata_octets;
2091 stats->rxdata_octets = conn->rxdata_octets;
2092 stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
2093 stats->dataout_pdus = conn->dataout_pdus_cnt;
2094 stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
2095 stats->datain_pdus = conn->datain_pdus_cnt;
2096 stats->r2t_pdus = conn->r2t_pdus_cnt;
2097 stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
2098 stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
2099 stats->digest_err = 0;
2100 stats->timeout_err = 0;
2101 stats->custom_length = 1;
2102 strcpy(stats->custom[0].desc, "eh_abort_cnt");
2103 stats->custom[0].value = conn->eh_abort_cnt;
2105 EXPORT_SYMBOL_GPL(cxgbi_get_conn_stats);
2107 static int cxgbi_conn_max_xmit_dlength(struct iscsi_conn *conn)
2109 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2110 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2111 struct cxgbi_device *cdev = cconn->chba->cdev;
2112 unsigned int headroom = SKB_MAX_HEAD(cdev->skb_tx_rsvd);
2113 unsigned int max_def = 512 * MAX_SKB_FRAGS;
2114 unsigned int max = max(max_def, headroom);
2116 max = min(cconn->chba->cdev->tx_max_size, max);
2117 if (conn->max_xmit_dlength)
2118 conn->max_xmit_dlength = min(conn->max_xmit_dlength, max);
2119 else
2120 conn->max_xmit_dlength = max;
2121 cxgbi_align_pdu_size(conn->max_xmit_dlength);
2123 return 0;
2126 static int cxgbi_conn_max_recv_dlength(struct iscsi_conn *conn)
2128 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2129 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2130 unsigned int max = cconn->chba->cdev->rx_max_size;
2132 cxgbi_align_pdu_size(max);
2134 if (conn->max_recv_dlength) {
2135 if (conn->max_recv_dlength > max) {
2136 pr_err("MaxRecvDataSegmentLength %u > %u.\n",
2137 conn->max_recv_dlength, max);
2138 return -EINVAL;
2140 conn->max_recv_dlength = min(conn->max_recv_dlength, max);
2141 cxgbi_align_pdu_size(conn->max_recv_dlength);
2142 } else
2143 conn->max_recv_dlength = max;
2145 return 0;
2148 int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
2149 enum iscsi_param param, char *buf, int buflen)
2151 struct iscsi_conn *conn = cls_conn->dd_data;
2152 struct iscsi_session *session = conn->session;
2153 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2154 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2155 struct cxgbi_sock *csk = cconn->cep->csk;
2156 int value, err = 0;
2158 log_debug(1 << CXGBI_DBG_ISCSI,
2159 "cls_conn 0x%p, param %d, buf(%d) %s.\n",
2160 cls_conn, param, buflen, buf);
2162 switch (param) {
2163 case ISCSI_PARAM_HDRDGST_EN:
2164 err = iscsi_set_param(cls_conn, param, buf, buflen);
2165 if (!err && conn->hdrdgst_en)
2166 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2167 conn->hdrdgst_en,
2168 conn->datadgst_en, 0);
2169 break;
2170 case ISCSI_PARAM_DATADGST_EN:
2171 err = iscsi_set_param(cls_conn, param, buf, buflen);
2172 if (!err && conn->datadgst_en)
2173 err = csk->cdev->csk_ddp_setup_digest(csk, csk->tid,
2174 conn->hdrdgst_en,
2175 conn->datadgst_en, 0);
2176 break;
2177 case ISCSI_PARAM_MAX_R2T:
2178 sscanf(buf, "%d", &value);
2179 if (value <= 0 || !is_power_of_2(value))
2180 return -EINVAL;
2181 if (session->max_r2t == value)
2182 break;
2183 iscsi_tcp_r2tpool_free(session);
2184 err = iscsi_set_param(cls_conn, param, buf, buflen);
2185 if (!err && iscsi_tcp_r2tpool_alloc(session))
2186 return -ENOMEM;
2187 case ISCSI_PARAM_MAX_RECV_DLENGTH:
2188 err = iscsi_set_param(cls_conn, param, buf, buflen);
2189 if (!err)
2190 err = cxgbi_conn_max_recv_dlength(conn);
2191 break;
2192 case ISCSI_PARAM_MAX_XMIT_DLENGTH:
2193 err = iscsi_set_param(cls_conn, param, buf, buflen);
2194 if (!err)
2195 err = cxgbi_conn_max_xmit_dlength(conn);
2196 break;
2197 default:
2198 return iscsi_set_param(cls_conn, param, buf, buflen);
2200 return err;
2202 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
2204 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
2205 char *buf)
2207 struct cxgbi_endpoint *cep = ep->dd_data;
2208 struct cxgbi_sock *csk;
2209 int len;
2211 log_debug(1 << CXGBI_DBG_ISCSI,
2212 "cls_conn 0x%p, param %d.\n", ep, param);
2214 switch (param) {
2215 case ISCSI_PARAM_CONN_PORT:
2216 case ISCSI_PARAM_CONN_ADDRESS:
2217 if (!cep)
2218 return -ENOTCONN;
2220 csk = cep->csk;
2221 if (!csk)
2222 return -ENOTCONN;
2224 return iscsi_conn_get_addr_param((struct sockaddr_storage *)
2225 &csk->daddr, param, buf);
2226 default:
2227 return -ENOSYS;
2229 return len;
2231 EXPORT_SYMBOL_GPL(cxgbi_get_ep_param);
2233 struct iscsi_cls_conn *
2234 cxgbi_create_conn(struct iscsi_cls_session *cls_session, u32 cid)
2236 struct iscsi_cls_conn *cls_conn;
2237 struct iscsi_conn *conn;
2238 struct iscsi_tcp_conn *tcp_conn;
2239 struct cxgbi_conn *cconn;
2241 cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*cconn), cid);
2242 if (!cls_conn)
2243 return NULL;
2245 conn = cls_conn->dd_data;
2246 tcp_conn = conn->dd_data;
2247 cconn = tcp_conn->dd_data;
2248 cconn->iconn = conn;
2250 log_debug(1 << CXGBI_DBG_ISCSI,
2251 "cid %u(0x%x), cls 0x%p,0x%p, conn 0x%p,0x%p,0x%p.\n",
2252 cid, cid, cls_session, cls_conn, conn, tcp_conn, cconn);
2254 return cls_conn;
2256 EXPORT_SYMBOL_GPL(cxgbi_create_conn);
2258 int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
2259 struct iscsi_cls_conn *cls_conn,
2260 u64 transport_eph, int is_leading)
2262 struct iscsi_conn *conn = cls_conn->dd_data;
2263 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
2264 struct cxgbi_conn *cconn = tcp_conn->dd_data;
2265 struct iscsi_endpoint *ep;
2266 struct cxgbi_endpoint *cep;
2267 struct cxgbi_sock *csk;
2268 int err;
2270 ep = iscsi_lookup_endpoint(transport_eph);
2271 if (!ep)
2272 return -EINVAL;
2274 /* setup ddp pagesize */
2275 cep = ep->dd_data;
2276 csk = cep->csk;
2277 err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid, page_idx, 0);
2278 if (err < 0)
2279 return err;
2281 err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
2282 if (err)
2283 return -EINVAL;
2285 /* calculate the tag idx bits needed for this conn based on cmds_max */
2286 cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
2288 write_lock_bh(&csk->callback_lock);
2289 csk->user_data = conn;
2290 cconn->chba = cep->chba;
2291 cconn->cep = cep;
2292 cep->cconn = cconn;
2293 write_unlock_bh(&csk->callback_lock);
2295 cxgbi_conn_max_xmit_dlength(conn);
2296 cxgbi_conn_max_recv_dlength(conn);
2298 log_debug(1 << CXGBI_DBG_ISCSI,
2299 "cls 0x%p,0x%p, ep 0x%p, cconn 0x%p, csk 0x%p.\n",
2300 cls_session, cls_conn, ep, cconn, csk);
2301 /* init recv engine */
2302 iscsi_tcp_hdr_recv_prep(tcp_conn);
2304 return 0;
2306 EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
2308 struct iscsi_cls_session *cxgbi_create_session(struct iscsi_endpoint *ep,
2309 u16 cmds_max, u16 qdepth,
2310 u32 initial_cmdsn)
2312 struct cxgbi_endpoint *cep;
2313 struct cxgbi_hba *chba;
2314 struct Scsi_Host *shost;
2315 struct iscsi_cls_session *cls_session;
2316 struct iscsi_session *session;
2318 if (!ep) {
2319 pr_err("missing endpoint.\n");
2320 return NULL;
2323 cep = ep->dd_data;
2324 chba = cep->chba;
2325 shost = chba->shost;
2327 BUG_ON(chba != iscsi_host_priv(shost));
2329 cls_session = iscsi_session_setup(chba->cdev->itp, shost,
2330 cmds_max, 0,
2331 sizeof(struct iscsi_tcp_task) +
2332 sizeof(struct cxgbi_task_data),
2333 initial_cmdsn, ISCSI_MAX_TARGET);
2334 if (!cls_session)
2335 return NULL;
2337 session = cls_session->dd_data;
2338 if (iscsi_tcp_r2tpool_alloc(session))
2339 goto remove_session;
2341 log_debug(1 << CXGBI_DBG_ISCSI,
2342 "ep 0x%p, cls sess 0x%p.\n", ep, cls_session);
2343 return cls_session;
2345 remove_session:
2346 iscsi_session_teardown(cls_session);
2347 return NULL;
2349 EXPORT_SYMBOL_GPL(cxgbi_create_session);
2351 void cxgbi_destroy_session(struct iscsi_cls_session *cls_session)
2353 log_debug(1 << CXGBI_DBG_ISCSI,
2354 "cls sess 0x%p.\n", cls_session);
2356 iscsi_tcp_r2tpool_free(cls_session->dd_data);
2357 iscsi_session_teardown(cls_session);
2359 EXPORT_SYMBOL_GPL(cxgbi_destroy_session);
2361 int cxgbi_set_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2362 char *buf, int buflen)
2364 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2366 if (!chba->ndev) {
2367 shost_printk(KERN_ERR, shost, "Could not get host param. "
2368 "netdev for host not set.\n");
2369 return -ENODEV;
2372 log_debug(1 << CXGBI_DBG_ISCSI,
2373 "shost 0x%p, hba 0x%p,%s, param %d, buf(%d) %s.\n",
2374 shost, chba, chba->ndev->name, param, buflen, buf);
2376 switch (param) {
2377 case ISCSI_HOST_PARAM_IPADDRESS:
2379 __be32 addr = in_aton(buf);
2380 log_debug(1 << CXGBI_DBG_ISCSI,
2381 "hba %s, req. ipv4 %pI4.\n", chba->ndev->name, &addr);
2382 cxgbi_set_iscsi_ipv4(chba, addr);
2383 return 0;
2385 case ISCSI_HOST_PARAM_HWADDRESS:
2386 case ISCSI_HOST_PARAM_NETDEV_NAME:
2387 return 0;
2388 default:
2389 return iscsi_host_set_param(shost, param, buf, buflen);
2392 EXPORT_SYMBOL_GPL(cxgbi_set_host_param);
2394 int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2395 char *buf)
2397 struct cxgbi_hba *chba = iscsi_host_priv(shost);
2398 int len = 0;
2400 if (!chba->ndev) {
2401 shost_printk(KERN_ERR, shost, "Could not get host param. "
2402 "netdev for host not set.\n");
2403 return -ENODEV;
2406 log_debug(1 << CXGBI_DBG_ISCSI,
2407 "shost 0x%p, hba 0x%p,%s, param %d.\n",
2408 shost, chba, chba->ndev->name, param);
2410 switch (param) {
2411 case ISCSI_HOST_PARAM_HWADDRESS:
2412 len = sysfs_format_mac(buf, chba->ndev->dev_addr, 6);
2413 break;
2414 case ISCSI_HOST_PARAM_NETDEV_NAME:
2415 len = sprintf(buf, "%s\n", chba->ndev->name);
2416 break;
2417 case ISCSI_HOST_PARAM_IPADDRESS:
2419 __be32 addr;
2421 addr = cxgbi_get_iscsi_ipv4(chba);
2422 len = sprintf(buf, "%pI4", &addr);
2423 log_debug(1 << CXGBI_DBG_ISCSI,
2424 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr);
2425 break;
2427 default:
2428 return iscsi_host_get_param(shost, param, buf);
2431 return len;
2433 EXPORT_SYMBOL_GPL(cxgbi_get_host_param);
2435 struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
2436 struct sockaddr *dst_addr,
2437 int non_blocking)
2439 struct iscsi_endpoint *ep;
2440 struct cxgbi_endpoint *cep;
2441 struct cxgbi_hba *hba = NULL;
2442 struct cxgbi_sock *csk;
2443 int err = -EINVAL;
2445 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2446 "shost 0x%p, non_blocking %d, dst_addr 0x%p.\n",
2447 shost, non_blocking, dst_addr);
2449 if (shost) {
2450 hba = iscsi_host_priv(shost);
2451 if (!hba) {
2452 pr_info("shost 0x%p, priv NULL.\n", shost);
2453 goto err_out;
2457 csk = cxgbi_check_route(dst_addr);
2458 if (IS_ERR(csk))
2459 return (struct iscsi_endpoint *)csk;
2460 cxgbi_sock_get(csk);
2462 if (!hba)
2463 hba = csk->cdev->hbas[csk->port_id];
2464 else if (hba != csk->cdev->hbas[csk->port_id]) {
2465 pr_info("Could not connect through requested host %u"
2466 "hba 0x%p != 0x%p (%u).\n",
2467 shost->host_no, hba,
2468 csk->cdev->hbas[csk->port_id], csk->port_id);
2469 err = -ENOSPC;
2470 goto release_conn;
2473 err = sock_get_port(csk);
2474 if (err)
2475 goto release_conn;
2477 cxgbi_sock_set_state(csk, CTP_CONNECTING);
2478 err = csk->cdev->csk_init_act_open(csk);
2479 if (err)
2480 goto release_conn;
2482 if (cxgbi_sock_is_closing(csk)) {
2483 err = -ENOSPC;
2484 pr_info("csk 0x%p is closing.\n", csk);
2485 goto release_conn;
2488 ep = iscsi_create_endpoint(sizeof(*cep));
2489 if (!ep) {
2490 err = -ENOMEM;
2491 pr_info("iscsi alloc ep, OOM.\n");
2492 goto release_conn;
2495 cep = ep->dd_data;
2496 cep->csk = csk;
2497 cep->chba = hba;
2499 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2500 "ep 0x%p, cep 0x%p, csk 0x%p, hba 0x%p,%s.\n",
2501 ep, cep, csk, hba, hba->ndev->name);
2502 return ep;
2504 release_conn:
2505 cxgbi_sock_put(csk);
2506 cxgbi_sock_closed(csk);
2507 err_out:
2508 return ERR_PTR(err);
2510 EXPORT_SYMBOL_GPL(cxgbi_ep_connect);
2512 int cxgbi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
2514 struct cxgbi_endpoint *cep = ep->dd_data;
2515 struct cxgbi_sock *csk = cep->csk;
2517 if (!cxgbi_sock_is_established(csk))
2518 return 0;
2519 return 1;
2521 EXPORT_SYMBOL_GPL(cxgbi_ep_poll);
2523 void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
2525 struct cxgbi_endpoint *cep = ep->dd_data;
2526 struct cxgbi_conn *cconn = cep->cconn;
2527 struct cxgbi_sock *csk = cep->csk;
2529 log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_SOCK,
2530 "ep 0x%p, cep 0x%p, cconn 0x%p, csk 0x%p,%u,0x%lx.\n",
2531 ep, cep, cconn, csk, csk->state, csk->flags);
2533 if (cconn && cconn->iconn) {
2534 iscsi_suspend_tx(cconn->iconn);
2535 write_lock_bh(&csk->callback_lock);
2536 cep->csk->user_data = NULL;
2537 cconn->cep = NULL;
2538 write_unlock_bh(&csk->callback_lock);
2540 iscsi_destroy_endpoint(ep);
2542 if (likely(csk->state >= CTP_ESTABLISHED))
2543 need_active_close(csk);
2544 else
2545 cxgbi_sock_closed(csk);
2547 cxgbi_sock_put(csk);
2549 EXPORT_SYMBOL_GPL(cxgbi_ep_disconnect);
2551 int cxgbi_iscsi_init(struct iscsi_transport *itp,
2552 struct scsi_transport_template **stt)
2554 *stt = iscsi_register_transport(itp);
2555 if (*stt == NULL) {
2556 pr_err("unable to register %s transport 0x%p.\n",
2557 itp->name, itp);
2558 return -ENODEV;
2560 log_debug(1 << CXGBI_DBG_ISCSI,
2561 "%s, registered iscsi transport 0x%p.\n",
2562 itp->name, stt);
2563 return 0;
2565 EXPORT_SYMBOL_GPL(cxgbi_iscsi_init);
2567 void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
2568 struct scsi_transport_template **stt)
2570 if (*stt) {
2571 log_debug(1 << CXGBI_DBG_ISCSI,
2572 "de-register transport 0x%p, %s, stt 0x%p.\n",
2573 itp, itp->name, *stt);
2574 *stt = NULL;
2575 iscsi_unregister_transport(itp);
2578 EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
2580 static int __init libcxgbi_init_module(void)
2582 sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1;
2583 sw_tag_age_bits = (__ilog2_u32(ISCSI_AGE_MASK)) + 1;
2585 pr_info("tag itt 0x%x, %u bits, age 0x%x, %u bits.\n",
2586 ISCSI_ITT_MASK, sw_tag_idx_bits,
2587 ISCSI_AGE_MASK, sw_tag_age_bits);
2589 ddp_setup_host_page_size();
2590 return 0;
2593 static void __exit libcxgbi_exit_module(void)
2595 cxgbi_device_unregister_all(0xFF);
2596 return;
2599 module_init(libcxgbi_init_module);
2600 module_exit(libcxgbi_exit_module);