2 * cxgb3i_ddp.c: Chelsio S3xx iSCSI DDP Manager.
4 * Copyright (c) 2008 Chelsio Communications, Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
10 * Written by: Karen Xie (kxie@chelsio.com)
13 #include <linux/skbuff.h>
14 #include <linux/scatterlist.h>
20 #include "cxgb3_ctl_defs.h"
21 #include "cxgb3_offload.h"
22 #include "firmware_exports.h"
24 #include "cxgb3i_ddp.h"
26 #define DRV_MODULE_NAME "cxgb3i_ddp"
27 #define DRV_MODULE_VERSION "1.0.0"
28 #define DRV_MODULE_RELDATE "Dec. 1, 2008"
30 static char version
[] =
31 "Chelsio S3xx iSCSI DDP " DRV_MODULE_NAME
32 " v" DRV_MODULE_VERSION
" (" DRV_MODULE_RELDATE
")\n";
34 MODULE_AUTHOR("Karen Xie <kxie@chelsio.com>");
35 MODULE_DESCRIPTION("cxgb3i ddp pagepod manager");
36 MODULE_LICENSE("GPL");
37 MODULE_VERSION(DRV_MODULE_VERSION
);
39 #define ddp_log_error(fmt...) printk(KERN_ERR "cxgb3i_ddp: ERR! " fmt)
40 #define ddp_log_warn(fmt...) printk(KERN_WARNING "cxgb3i_ddp: WARN! " fmt)
41 #define ddp_log_info(fmt...) printk(KERN_INFO "cxgb3i_ddp: " fmt)
43 #ifdef __DEBUG_CXGB3I_DDP__
44 #define ddp_log_debug(fmt, args...) \
45 printk(KERN_INFO "cxgb3i_ddp: %s - " fmt, __func__ , ## args)
47 #define ddp_log_debug(fmt...)
51 * iSCSI Direct Data Placement
53 * T3 h/w can directly place the iSCSI Data-In or Data-Out PDU's payload into
54 * pre-posted final destination host-memory buffers based on the Initiator
55 * Task Tag (ITT) in Data-In or Target Task Tag (TTT) in Data-Out PDUs.
57 * The host memory address is programmed into h/w in the format of pagepod
59 * The location of the pagepod entry is encoded into ddp tag which is used or
60 * is the base for ITT/TTT.
63 #define DDP_PGIDX_MAX 4
64 #define DDP_THRESHOLD 2048
65 static unsigned char ddp_page_order
[DDP_PGIDX_MAX
] = {0, 1, 2, 4};
66 static unsigned char ddp_page_shift
[DDP_PGIDX_MAX
] = {12, 13, 14, 16};
67 static unsigned char page_idx
= DDP_PGIDX_MAX
;
69 static LIST_HEAD(cxgb3i_ddp_list
);
70 static DEFINE_RWLOCK(cxgb3i_ddp_rwlock
);
73 * functions to program the pagepod in h/w
75 static inline void ulp_mem_io_set_hdr(struct sk_buff
*skb
, unsigned int addr
)
77 struct ulp_mem_io
*req
= (struct ulp_mem_io
*)skb
->head
;
80 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_BYPASS
));
81 req
->cmd_lock_addr
= htonl(V_ULP_MEMIO_ADDR(addr
>> 5) |
82 V_ULPTX_CMD(ULP_MEM_WRITE
));
83 req
->len
= htonl(V_ULP_MEMIO_DATA_LEN(PPOD_SIZE
>> 5) |
84 V_ULPTX_NFLITS((PPOD_SIZE
>> 3) + 1));
87 static int set_ddp_map(struct cxgb3i_ddp_info
*ddp
, struct pagepod_hdr
*hdr
,
88 unsigned int idx
, unsigned int npods
,
89 struct cxgb3i_gather_list
*gl
)
91 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
94 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
95 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
99 /* hold on to the skb until we clear the ddp mapping */
102 ulp_mem_io_set_hdr(skb
, pm_addr
);
103 ppod
= (struct pagepod
*)
104 (skb
->head
+ sizeof(struct ulp_mem_io
));
105 memcpy(&(ppod
->hdr
), hdr
, sizeof(struct pagepod
));
106 for (pidx
= 4 * i
, j
= 0; j
< 5; ++j
, ++pidx
)
107 ppod
->addr
[j
] = pidx
< gl
->nelem
?
108 cpu_to_be64(gl
->phys_addr
[pidx
]) : 0UL;
110 skb
->priority
= CPL_PRIORITY_CONTROL
;
111 cxgb3_ofld_send(ddp
->tdev
, skb
);
116 static int clear_ddp_map(struct cxgb3i_ddp_info
*ddp
, unsigned int idx
,
119 unsigned int pm_addr
= (idx
<< PPOD_SIZE_SHIFT
) + ddp
->llimit
;
122 for (i
= 0; i
< npods
; i
++, idx
++, pm_addr
+= PPOD_SIZE
) {
123 struct sk_buff
*skb
= ddp
->gl_skb
[idx
];
125 ddp
->gl_skb
[idx
] = NULL
;
126 memset((skb
->head
+ sizeof(struct ulp_mem_io
)), 0, PPOD_SIZE
);
127 ulp_mem_io_set_hdr(skb
, pm_addr
);
128 skb
->priority
= CPL_PRIORITY_CONTROL
;
129 cxgb3_ofld_send(ddp
->tdev
, skb
);
134 static inline int ddp_find_unused_entries(struct cxgb3i_ddp_info
*ddp
,
135 int start
, int max
, int count
,
136 struct cxgb3i_gather_list
*gl
)
140 spin_lock(&ddp
->map_lock
);
141 for (i
= start
; i
<= max
;) {
142 for (j
= 0; j
< count
; j
++) {
143 if (ddp
->gl_map
[i
+ j
])
147 for (j
= 0; j
< count
; j
++)
148 ddp
->gl_map
[i
+ j
] = gl
;
149 spin_unlock(&ddp
->map_lock
);
154 spin_unlock(&ddp
->map_lock
);
158 static inline void ddp_unmark_entries(struct cxgb3i_ddp_info
*ddp
,
159 int start
, int count
)
161 spin_lock(&ddp
->map_lock
);
162 memset(&ddp
->gl_map
[start
], 0,
163 count
* sizeof(struct cxgb3i_gather_list
*));
164 spin_unlock(&ddp
->map_lock
);
167 static inline void ddp_free_gl_skb(struct cxgb3i_ddp_info
*ddp
,
172 for (i
= 0; i
< count
; i
++, idx
++)
173 if (ddp
->gl_skb
[idx
]) {
174 kfree_skb(ddp
->gl_skb
[idx
]);
175 ddp
->gl_skb
[idx
] = NULL
;
179 static inline int ddp_alloc_gl_skb(struct cxgb3i_ddp_info
*ddp
, int idx
,
180 int count
, gfp_t gfp
)
184 for (i
= 0; i
< count
; i
++) {
185 struct sk_buff
*skb
= alloc_skb(sizeof(struct ulp_mem_io
) +
188 ddp
->gl_skb
[idx
+ i
] = skb
;
189 skb_put(skb
, sizeof(struct ulp_mem_io
) + PPOD_SIZE
);
191 ddp_free_gl_skb(ddp
, idx
, i
);
199 * cxgb3i_ddp_find_page_index - return ddp page index for a given page size.
201 * return the ddp page index, if no match is found return DDP_PGIDX_MAX.
203 int cxgb3i_ddp_find_page_index(unsigned long pgsz
)
207 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++) {
208 if (pgsz
== (1UL << ddp_page_shift
[i
]))
211 ddp_log_debug("ddp page size 0x%lx not supported.\n", pgsz
);
212 return DDP_PGIDX_MAX
;
214 EXPORT_SYMBOL_GPL(cxgb3i_ddp_find_page_index
);
216 static inline void ddp_gl_unmap(struct pci_dev
*pdev
,
217 struct cxgb3i_gather_list
*gl
)
221 for (i
= 0; i
< gl
->nelem
; i
++)
222 pci_unmap_page(pdev
, gl
->phys_addr
[i
], PAGE_SIZE
,
226 static inline int ddp_gl_map(struct pci_dev
*pdev
,
227 struct cxgb3i_gather_list
*gl
)
231 for (i
= 0; i
< gl
->nelem
; i
++) {
232 gl
->phys_addr
[i
] = pci_map_page(pdev
, gl
->pages
[i
], 0,
235 if (unlikely(pci_dma_mapping_error(pdev
, gl
->phys_addr
[i
])))
243 unsigned int nelem
= gl
->nelem
;
246 ddp_gl_unmap(pdev
, gl
);
253 * cxgb3i_ddp_make_gl - build ddp page buffer list
254 * @xferlen: total buffer length
255 * @sgl: page buffer scatter-gather list
256 * @sgcnt: # of page buffers
257 * @pdev: pci_dev, used for pci map
258 * @gfp: allocation mode
260 * construct a ddp page buffer list from the scsi scattergather list.
261 * coalesce buffers as much as possible, and obtain dma addresses for
264 * Return the cxgb3i_gather_list constructed from the page buffers if the
265 * memory can be used for ddp. Return NULL otherwise.
267 struct cxgb3i_gather_list
*cxgb3i_ddp_make_gl(unsigned int xferlen
,
268 struct scatterlist
*sgl
,
270 struct pci_dev
*pdev
,
273 struct cxgb3i_gather_list
*gl
;
274 struct scatterlist
*sg
= sgl
;
275 struct page
*sgpage
= sg_page(sg
);
276 unsigned int sglen
= sg
->length
;
277 unsigned int sgoffset
= sg
->offset
;
278 unsigned int npages
= (xferlen
+ sgoffset
+ PAGE_SIZE
- 1) >>
282 if (xferlen
< DDP_THRESHOLD
) {
283 ddp_log_debug("xfer %u < threshold %u, no ddp.\n",
284 xferlen
, DDP_THRESHOLD
);
288 gl
= kzalloc(sizeof(struct cxgb3i_gather_list
) +
289 npages
* (sizeof(dma_addr_t
) + sizeof(struct page
*)),
294 gl
->pages
= (struct page
**)&gl
->phys_addr
[npages
];
295 gl
->length
= xferlen
;
296 gl
->offset
= sgoffset
;
297 gl
->pages
[0] = sgpage
;
301 struct page
*page
= sg_page(sg
);
303 if (sgpage
== page
&& sg
->offset
== sgoffset
+ sglen
)
306 /* make sure the sgl is fit for ddp:
307 * each has the same page size, and
308 * all of the middle pages are used completely
310 if ((j
&& sgoffset
) ||
312 ((sglen
+ sgoffset
) & ~PAGE_MASK
)))
316 if (j
== gl
->nelem
|| sg
->offset
)
320 sgoffset
= sg
->offset
;
328 if (ddp_gl_map(pdev
, gl
) < 0)
337 EXPORT_SYMBOL_GPL(cxgb3i_ddp_make_gl
);
340 * cxgb3i_ddp_release_gl - release a page buffer list
341 * @gl: a ddp page buffer list
342 * @pdev: pci_dev used for pci_unmap
343 * free a ddp page buffer list resulted from cxgb3i_ddp_make_gl().
345 void cxgb3i_ddp_release_gl(struct cxgb3i_gather_list
*gl
,
346 struct pci_dev
*pdev
)
348 ddp_gl_unmap(pdev
, gl
);
351 EXPORT_SYMBOL_GPL(cxgb3i_ddp_release_gl
);
354 * cxgb3i_ddp_tag_reserve - set up ddp for a data transfer
355 * @tdev: t3cdev adapter
356 * @tid: connection id
357 * @tformat: tag format
358 * @tagp: the s/w tag, if ddp setup is successful, it will be updated with
360 * @gl: the page momory list
361 * @gfp: allocation mode
363 * ddp setup for a given page buffer list and construct the ddp tag.
364 * return 0 if success, < 0 otherwise.
366 int cxgb3i_ddp_tag_reserve(struct t3cdev
*tdev
, unsigned int tid
,
367 struct cxgb3i_tag_format
*tformat
, u32
*tagp
,
368 struct cxgb3i_gather_list
*gl
, gfp_t gfp
)
370 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
371 struct pagepod_hdr hdr
;
373 int idx
= -1, idx_max
;
378 if (page_idx
>= DDP_PGIDX_MAX
|| !ddp
|| !gl
|| !gl
->nelem
||
379 gl
->length
< DDP_THRESHOLD
) {
380 ddp_log_debug("pgidx %u, xfer %u/%u, NO ddp.\n",
381 page_idx
, gl
->length
, DDP_THRESHOLD
);
385 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
386 idx_max
= ddp
->nppods
- npods
+ 1;
388 if (ddp
->idx_last
== ddp
->nppods
)
389 idx
= ddp_find_unused_entries(ddp
, 0, idx_max
, npods
, gl
);
391 idx
= ddp_find_unused_entries(ddp
, ddp
->idx_last
+ 1,
393 if (idx
< 0 && ddp
->idx_last
>= npods
)
394 idx
= ddp_find_unused_entries(ddp
, 0,
395 ddp
->idx_last
- npods
+ 1,
399 ddp_log_debug("xferlen %u, gl %u, npods %u NO DDP.\n",
400 gl
->length
, gl
->nelem
, npods
);
404 err
= ddp_alloc_gl_skb(ddp
, idx
, npods
, gfp
);
408 tag
= cxgb3i_ddp_tag_base(tformat
, sw_tag
);
409 tag
|= idx
<< PPOD_IDX_SHIFT
;
412 hdr
.vld_tid
= htonl(F_PPOD_VALID
| V_PPOD_TID(tid
));
413 hdr
.pgsz_tag_clr
= htonl(tag
& ddp
->rsvd_tag_mask
);
414 hdr
.maxoffset
= htonl(gl
->length
);
415 hdr
.pgoffset
= htonl(gl
->offset
);
417 err
= set_ddp_map(ddp
, &hdr
, idx
, npods
, gl
);
422 ddp_log_debug("xfer %u, gl %u,%u, tid 0x%x, 0x%x -> 0x%x(%u,%u).\n",
423 gl
->length
, gl
->nelem
, gl
->offset
, tid
, sw_tag
, tag
,
429 ddp_free_gl_skb(ddp
, idx
, npods
);
431 ddp_unmark_entries(ddp
, idx
, npods
);
434 EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_reserve
);
437 * cxgb3i_ddp_tag_release - release a ddp tag
438 * @tdev: t3cdev adapter
440 * ddp cleanup for a given ddp tag and release all the resources held
442 void cxgb3i_ddp_tag_release(struct t3cdev
*tdev
, u32 tag
)
444 struct cxgb3i_ddp_info
*ddp
= tdev
->ulp_iscsi
;
448 ddp_log_error("release ddp tag 0x%x, ddp NULL.\n", tag
);
452 idx
= (tag
>> PPOD_IDX_SHIFT
) & ddp
->idx_mask
;
453 if (idx
< ddp
->nppods
) {
454 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[idx
];
458 ddp_log_error("release ddp 0x%x, idx 0x%x, gl NULL.\n",
462 npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1) >> PPOD_PAGES_SHIFT
;
463 ddp_log_debug("ddp tag 0x%x, release idx 0x%x, npods %u.\n",
465 clear_ddp_map(ddp
, idx
, npods
);
466 ddp_unmark_entries(ddp
, idx
, npods
);
467 cxgb3i_ddp_release_gl(gl
, ddp
->pdev
);
469 ddp_log_error("ddp tag 0x%x, idx 0x%x > max 0x%x.\n",
470 tag
, idx
, ddp
->nppods
);
472 EXPORT_SYMBOL_GPL(cxgb3i_ddp_tag_release
);
474 static int setup_conn_pgidx(struct t3cdev
*tdev
, unsigned int tid
, int pg_idx
,
477 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
479 struct cpl_set_tcb_field
*req
;
480 u64 val
= pg_idx
< DDP_PGIDX_MAX
? pg_idx
: 0;
485 /* set up ulp submode and page size */
486 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
487 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
488 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
489 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
491 req
->word
= htons(31);
492 req
->mask
= cpu_to_be64(0xF0000000);
493 req
->val
= cpu_to_be64(val
<< 28);
494 skb
->priority
= CPL_PRIORITY_CONTROL
;
496 cxgb3_ofld_send(tdev
, skb
);
501 * cxgb3i_setup_conn_host_pagesize - setup the conn.'s ddp page size
502 * @tdev: t3cdev adapter
503 * @tid: connection id
504 * @reply: request reply from h/w
505 * set up the ddp page size based on the host PAGE_SIZE for a connection
508 int cxgb3i_setup_conn_host_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
511 return setup_conn_pgidx(tdev
, tid
, page_idx
, reply
);
513 EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_host_pagesize
);
516 * cxgb3i_setup_conn_pagesize - setup the conn.'s ddp page size
517 * @tdev: t3cdev adapter
518 * @tid: connection id
519 * @reply: request reply from h/w
520 * @pgsz: ddp page size
521 * set up the ddp page size for a connection identified by tid
523 int cxgb3i_setup_conn_pagesize(struct t3cdev
*tdev
, unsigned int tid
,
524 int reply
, unsigned long pgsz
)
526 int pgidx
= cxgb3i_ddp_find_page_index(pgsz
);
528 return setup_conn_pgidx(tdev
, tid
, pgidx
, reply
);
530 EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_pagesize
);
533 * cxgb3i_setup_conn_digest - setup conn. digest setting
534 * @tdev: t3cdev adapter
535 * @tid: connection id
536 * @hcrc: header digest enabled
537 * @dcrc: data digest enabled
538 * @reply: request reply from h/w
539 * set up the iscsi digest settings for a connection identified by tid
541 int cxgb3i_setup_conn_digest(struct t3cdev
*tdev
, unsigned int tid
,
542 int hcrc
, int dcrc
, int reply
)
544 struct sk_buff
*skb
= alloc_skb(sizeof(struct cpl_set_tcb_field
),
546 struct cpl_set_tcb_field
*req
;
547 u64 val
= (hcrc
? 1 : 0) | (dcrc
? 2 : 0);
552 /* set up ulp submode and page size */
553 req
= (struct cpl_set_tcb_field
*)skb_put(skb
, sizeof(*req
));
554 req
->wr
.wr_hi
= htonl(V_WR_OP(FW_WROPCODE_FORWARD
));
555 OPCODE_TID(req
) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD
, tid
));
556 req
->reply
= V_NO_REPLY(reply
? 0 : 1);
558 req
->word
= htons(31);
559 req
->mask
= cpu_to_be64(0x0F000000);
560 req
->val
= cpu_to_be64(val
<< 24);
561 skb
->priority
= CPL_PRIORITY_CONTROL
;
563 cxgb3_ofld_send(tdev
, skb
);
566 EXPORT_SYMBOL_GPL(cxgb3i_setup_conn_digest
);
568 static int ddp_init(struct t3cdev
*tdev
)
570 struct cxgb3i_ddp_info
*ddp
;
571 struct ulp_iscsi_info uinfo
;
572 unsigned int ppmax
, bits
;
574 static int vers_printed
;
577 printk(KERN_INFO
"%s", version
);
581 err
= tdev
->ctl(tdev
, ULP_ISCSI_GET_PARAMS
, &uinfo
);
583 ddp_log_error("%s, failed to get iscsi param err=%d.\n",
588 ppmax
= (uinfo
.ulimit
- uinfo
.llimit
+ 1) >> PPOD_SIZE_SHIFT
;
589 bits
= __ilog2_u32(ppmax
) + 1;
590 if (bits
> PPOD_IDX_MAX_SIZE
)
591 bits
= PPOD_IDX_MAX_SIZE
;
592 ppmax
= (1 << (bits
- 1)) - 1;
594 ddp
= cxgb3i_alloc_big_mem(sizeof(struct cxgb3i_ddp_info
) +
596 (sizeof(struct cxgb3i_gather_list
*) +
597 sizeof(struct sk_buff
*)),
600 ddp_log_warn("%s unable to alloc ddp 0x%d, ddp disabled.\n",
604 ddp
->gl_map
= (struct cxgb3i_gather_list
**)(ddp
+ 1);
605 ddp
->gl_skb
= (struct sk_buff
**)(((char *)ddp
->gl_map
) +
607 sizeof(struct cxgb3i_gather_list
*));
608 spin_lock_init(&ddp
->map_lock
);
611 ddp
->pdev
= uinfo
.pdev
;
612 ddp
->max_txsz
= min_t(unsigned int, uinfo
.max_txsz
, ULP2_MAX_PKT_SIZE
);
613 ddp
->max_rxsz
= min_t(unsigned int, uinfo
.max_rxsz
, ULP2_MAX_PKT_SIZE
);
614 ddp
->llimit
= uinfo
.llimit
;
615 ddp
->ulimit
= uinfo
.ulimit
;
617 ddp
->idx_last
= ppmax
;
618 ddp
->idx_bits
= bits
;
619 ddp
->idx_mask
= (1 << bits
) - 1;
620 ddp
->rsvd_tag_mask
= (1 << (bits
+ PPOD_IDX_SHIFT
)) - 1;
622 uinfo
.tagmask
= ddp
->idx_mask
<< PPOD_IDX_SHIFT
;
623 for (i
= 0; i
< DDP_PGIDX_MAX
; i
++)
624 uinfo
.pgsz_factor
[i
] = ddp_page_order
[i
];
625 uinfo
.ulimit
= uinfo
.llimit
+ (ppmax
<< PPOD_SIZE_SHIFT
);
627 err
= tdev
->ctl(tdev
, ULP_ISCSI_SET_PARAMS
, &uinfo
);
629 ddp_log_warn("%s unable to set iscsi param err=%d, "
630 "ddp disabled.\n", tdev
->name
, err
);
634 tdev
->ulp_iscsi
= ddp
;
636 /* add to the list */
637 write_lock(&cxgb3i_ddp_rwlock
);
638 list_add_tail(&ddp
->list
, &cxgb3i_ddp_list
);
639 write_unlock(&cxgb3i_ddp_rwlock
);
641 ddp_log_info("nppods %u (0x%x ~ 0x%x), bits %u, mask 0x%x,0x%x "
643 ppmax
, ddp
->llimit
, ddp
->ulimit
, ddp
->idx_bits
,
644 ddp
->idx_mask
, ddp
->rsvd_tag_mask
,
645 ddp
->max_txsz
, ddp
->max_rxsz
);
649 cxgb3i_free_big_mem(ddp
);
654 * cxgb3i_adapter_ddp_init - initialize the adapter's ddp resource
655 * @tdev: t3cdev adapter
656 * @tformat: tag format
657 * @txsz: max tx pkt size, filled in by this func.
658 * @rxsz: max rx pkt size, filled in by this func.
659 * initialize the ddp pagepod manager for a given adapter if needed and
660 * setup the tag format for a given iscsi entity
662 int cxgb3i_adapter_ddp_init(struct t3cdev
*tdev
,
663 struct cxgb3i_tag_format
*tformat
,
664 unsigned int *txsz
, unsigned int *rxsz
)
666 struct cxgb3i_ddp_info
*ddp
;
667 unsigned char idx_bits
;
672 if (!tdev
->ulp_iscsi
) {
673 int err
= ddp_init(tdev
);
677 ddp
= (struct cxgb3i_ddp_info
*)tdev
->ulp_iscsi
;
679 idx_bits
= 32 - tformat
->sw_bits
;
680 tformat
->rsvd_bits
= ddp
->idx_bits
;
681 tformat
->rsvd_shift
= PPOD_IDX_SHIFT
;
682 tformat
->rsvd_mask
= (1 << tformat
->rsvd_bits
) - 1;
684 ddp_log_info("tag format: sw %u, rsvd %u,%u, mask 0x%x.\n",
685 tformat
->sw_bits
, tformat
->rsvd_bits
,
686 tformat
->rsvd_shift
, tformat
->rsvd_mask
);
688 *txsz
= ddp
->max_txsz
;
689 *rxsz
= ddp
->max_rxsz
;
690 ddp_log_info("ddp max pkt size: %u, %u.\n",
691 ddp
->max_txsz
, ddp
->max_rxsz
);
694 EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_init
);
696 static void ddp_release(struct cxgb3i_ddp_info
*ddp
)
699 struct t3cdev
*tdev
= ddp
->tdev
;
701 tdev
->ulp_iscsi
= NULL
;
702 while (i
< ddp
->nppods
) {
703 struct cxgb3i_gather_list
*gl
= ddp
->gl_map
[i
];
705 int npods
= (gl
->nelem
+ PPOD_PAGES_MAX
- 1)
709 ddp_free_gl_skb(ddp
, i
, npods
);
713 cxgb3i_free_big_mem(ddp
);
717 * cxgb3i_adapter_ddp_cleanup - release the adapter's ddp resource
718 * @tdev: t3cdev adapter
719 * release all the resource held by the ddp pagepod manager for a given
722 void cxgb3i_adapter_ddp_cleanup(struct t3cdev
*tdev
)
724 struct cxgb3i_ddp_info
*ddp
;
726 /* remove from the list */
727 write_lock(&cxgb3i_ddp_rwlock
);
728 list_for_each_entry(ddp
, &cxgb3i_ddp_list
, list
) {
729 if (ddp
->tdev
== tdev
) {
730 list_del(&ddp
->list
);
734 write_unlock(&cxgb3i_ddp_rwlock
);
739 EXPORT_SYMBOL_GPL(cxgb3i_adapter_ddp_cleanup
);
742 * cxgb3i_ddp_init_module - module init entry point
743 * initialize any driver wide global data structures
745 static int __init
cxgb3i_ddp_init_module(void)
747 page_idx
= cxgb3i_ddp_find_page_index(PAGE_SIZE
);
748 ddp_log_info("system PAGE_SIZE %lu, ddp idx %u.\n",
749 PAGE_SIZE
, page_idx
);
754 * cxgb3i_ddp_exit_module - module cleanup/exit entry point
755 * go through the ddp list and release any resource held.
757 static void __exit
cxgb3i_ddp_exit_module(void)
759 struct cxgb3i_ddp_info
*ddp
;
761 /* release all ddp manager if there is any */
762 write_lock(&cxgb3i_ddp_rwlock
);
763 list_for_each_entry(ddp
, &cxgb3i_ddp_list
, list
) {
764 list_del(&ddp
->list
);
767 write_unlock(&cxgb3i_ddp_rwlock
);
770 module_init(cxgb3i_ddp_init_module
);
771 module_exit(cxgb3i_ddp_exit_module
);