4 #include <rdma/ib_verbs.h>
5 #include <rdma/rdma_cm.h>
7 #include "rdma_transport.h"
9 #define RDS_FASTREG_SIZE 20
10 #define RDS_FASTREG_POOL_SIZE 2048
12 #define RDS_IW_MAX_SGE 8
13 #define RDS_IW_RECV_SGE 2
15 #define RDS_IW_DEFAULT_RECV_WR 1024
16 #define RDS_IW_DEFAULT_SEND_WR 256
18 #define RDS_IW_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
20 extern struct list_head rds_iw_devices
;
23 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
24 * try and minimize the amount of memory tied up both the device and
25 * socket receive queues.
27 /* page offset of the final full frag that fits in the page */
28 #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
29 struct rds_page_frag
{
30 struct list_head f_item
;
32 unsigned long f_offset
;
36 struct rds_iw_incoming
{
37 struct list_head ii_frags
;
38 struct rds_incoming ii_inc
;
41 struct rds_iw_connect_private
{
42 /* Add new fields at the end, and don't permute existing fields. */
47 __be16 dp_protocol_minor_mask
; /* bitmask */
50 __be32 dp_credit
; /* non-zero enables flow ctl */
53 struct rds_iw_scatterlist
{
54 struct scatterlist
*list
;
57 unsigned int dma_npages
;
61 struct rds_iw_mapping
{
62 spinlock_t m_lock
; /* protect the mapping struct */
63 struct list_head m_list
;
64 struct rds_iw_mr
*m_mr
;
66 struct rds_iw_scatterlist m_sg
;
69 struct rds_iw_send_work
{
70 struct rds_message
*s_rm
;
72 /* We should really put these into a union: */
73 struct rds_rdma_op
*s_op
;
74 struct rds_iw_mapping
*s_mapping
;
76 struct ib_fast_reg_page_list
*s_page_list
;
77 unsigned char s_remap_count
;
79 struct ib_send_wr s_wr
;
80 struct ib_sge s_sge
[RDS_IW_MAX_SGE
];
81 unsigned long s_queued
;
84 struct rds_iw_recv_work
{
85 struct rds_iw_incoming
*r_iwinc
;
86 struct rds_page_frag
*r_frag
;
87 struct ib_recv_wr r_wr
;
88 struct ib_sge r_sge
[2];
91 struct rds_iw_work_ring
{
101 struct rds_iw_connection
{
103 struct list_head iw_node
;
104 struct rds_iw_device
*rds_iwdev
;
105 struct rds_connection
*conn
;
107 /* alphabet soup, IBTA style */
108 struct rdma_cm_id
*i_cm_id
;
111 struct ib_cq
*i_send_cq
;
112 struct ib_cq
*i_recv_cq
;
115 struct rds_iw_work_ring i_send_ring
;
116 struct rds_message
*i_rm
;
117 struct rds_header
*i_send_hdrs
;
119 struct rds_iw_send_work
*i_sends
;
122 struct mutex i_recv_mutex
;
123 struct rds_iw_work_ring i_recv_ring
;
124 struct rds_iw_incoming
*i_iwinc
;
126 struct rds_header
*i_recv_hdrs
;
128 struct rds_iw_recv_work
*i_recvs
;
129 struct rds_page_frag i_frag
;
130 u64 i_ack_recv
; /* last ACK received */
133 unsigned long i_ack_flags
;
134 #ifdef KERNEL_HAS_ATOMIC64
135 atomic64_t i_ack_next
; /* next ACK to send */
137 spinlock_t i_ack_lock
; /* protect i_ack_next */
138 u64 i_ack_next
; /* next ACK to send */
140 struct rds_header
*i_ack
;
141 struct ib_send_wr i_ack_wr
;
142 struct ib_sge i_ack_sge
;
144 unsigned long i_ack_queued
;
146 /* Flow control related information
148 * Our algorithm uses a pair variables that we need to access
149 * atomically - one for the send credits, and one posted
150 * recv credits we need to transfer to remote.
151 * Rather than protect them using a slow spinlock, we put both into
152 * a single atomic_t and update it using cmpxchg
156 /* Protocol version specific information */
157 unsigned int i_flowctl
:1; /* enable/disable flow ctl */
158 unsigned int i_dma_local_lkey
:1;
159 unsigned int i_fastreg_posted
:1; /* fastreg posted on this connection */
160 /* Batched completions */
161 unsigned int i_unsignaled_wrs
;
162 long i_unsignaled_bytes
;
165 /* This assumes that atomic_t is at least 32 bits */
166 #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
167 #define IB_GET_POST_CREDITS(v) ((v) >> 16)
168 #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
169 #define IB_SET_POST_CREDITS(v) ((v) << 16)
171 struct rds_iw_cm_id
{
172 struct list_head list
;
173 struct rdma_cm_id
*cm_id
;
176 struct rds_iw_device
{
177 struct list_head list
;
178 struct list_head cm_id_list
;
179 struct list_head conn_list
;
180 struct ib_device
*dev
;
183 struct rds_iw_mr_pool
*mr_pool
;
186 unsigned int max_wrs
;
187 unsigned int dma_local_lkey
:1;
188 spinlock_t spinlock
; /* protect the above */
191 /* bits for i_ack_flags */
192 #define IB_ACK_IN_FLIGHT 0
193 #define IB_ACK_REQUESTED 1
195 /* Magic WR_ID for ACKs */
196 #define RDS_IW_ACK_WR_ID ((u64)0xffffffffffffffffULL)
197 #define RDS_IW_FAST_REG_WR_ID ((u64)0xefefefefefefefefULL)
198 #define RDS_IW_LOCAL_INV_WR_ID ((u64)0xdfdfdfdfdfdfdfdfULL)
200 struct rds_iw_statistics
{
201 uint64_t s_iw_connect_raced
;
202 uint64_t s_iw_listen_closed_stale
;
203 uint64_t s_iw_tx_cq_call
;
204 uint64_t s_iw_tx_cq_event
;
205 uint64_t s_iw_tx_ring_full
;
206 uint64_t s_iw_tx_throttle
;
207 uint64_t s_iw_tx_sg_mapping_failure
;
208 uint64_t s_iw_tx_stalled
;
209 uint64_t s_iw_tx_credit_updates
;
210 uint64_t s_iw_rx_cq_call
;
211 uint64_t s_iw_rx_cq_event
;
212 uint64_t s_iw_rx_ring_empty
;
213 uint64_t s_iw_rx_refill_from_cq
;
214 uint64_t s_iw_rx_refill_from_thread
;
215 uint64_t s_iw_rx_alloc_limit
;
216 uint64_t s_iw_rx_credit_updates
;
217 uint64_t s_iw_ack_sent
;
218 uint64_t s_iw_ack_send_failure
;
219 uint64_t s_iw_ack_send_delayed
;
220 uint64_t s_iw_ack_send_piggybacked
;
221 uint64_t s_iw_ack_received
;
222 uint64_t s_iw_rdma_mr_alloc
;
223 uint64_t s_iw_rdma_mr_free
;
224 uint64_t s_iw_rdma_mr_used
;
225 uint64_t s_iw_rdma_mr_pool_flush
;
226 uint64_t s_iw_rdma_mr_pool_wait
;
227 uint64_t s_iw_rdma_mr_pool_depleted
;
230 extern struct workqueue_struct
*rds_iw_wq
;
233 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
236 static inline void rds_iw_dma_sync_sg_for_cpu(struct ib_device
*dev
,
237 struct scatterlist
*sg
, unsigned int sg_dma_len
, int direction
)
241 for (i
= 0; i
< sg_dma_len
; ++i
) {
242 ib_dma_sync_single_for_cpu(dev
,
243 ib_sg_dma_address(dev
, &sg
[i
]),
244 ib_sg_dma_len(dev
, &sg
[i
]),
248 #define ib_dma_sync_sg_for_cpu rds_iw_dma_sync_sg_for_cpu
250 static inline void rds_iw_dma_sync_sg_for_device(struct ib_device
*dev
,
251 struct scatterlist
*sg
, unsigned int sg_dma_len
, int direction
)
255 for (i
= 0; i
< sg_dma_len
; ++i
) {
256 ib_dma_sync_single_for_device(dev
,
257 ib_sg_dma_address(dev
, &sg
[i
]),
258 ib_sg_dma_len(dev
, &sg
[i
]),
262 #define ib_dma_sync_sg_for_device rds_iw_dma_sync_sg_for_device
264 static inline u32
rds_iw_local_dma_lkey(struct rds_iw_connection
*ic
)
266 return ic
->i_dma_local_lkey
? ic
->i_cm_id
->device
->local_dma_lkey
: ic
->i_mr
->lkey
;
270 extern struct rds_transport rds_iw_transport
;
271 extern void rds_iw_add_one(struct ib_device
*device
);
272 extern void rds_iw_remove_one(struct ib_device
*device
);
273 extern struct ib_client rds_iw_client
;
275 extern unsigned int fastreg_pool_size
;
276 extern unsigned int fastreg_message_size
;
278 extern spinlock_t iw_nodev_conns_lock
;
279 extern struct list_head iw_nodev_conns
;
282 int rds_iw_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
);
283 void rds_iw_conn_free(void *arg
);
284 int rds_iw_conn_connect(struct rds_connection
*conn
);
285 void rds_iw_conn_shutdown(struct rds_connection
*conn
);
286 void rds_iw_state_change(struct sock
*sk
);
287 int __init
rds_iw_listen_init(void);
288 void rds_iw_listen_stop(void);
289 void __rds_iw_conn_error(struct rds_connection
*conn
, const char *, ...);
290 int rds_iw_cm_handle_connect(struct rdma_cm_id
*cm_id
,
291 struct rdma_cm_event
*event
);
292 int rds_iw_cm_initiate_connect(struct rdma_cm_id
*cm_id
);
293 void rds_iw_cm_connect_complete(struct rds_connection
*conn
,
294 struct rdma_cm_event
*event
);
297 #define rds_iw_conn_error(conn, fmt...) \
298 __rds_iw_conn_error(conn, KERN_WARNING "RDS/IW: " fmt)
301 int rds_iw_update_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
);
302 void rds_iw_add_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
);
303 void rds_iw_remove_conn(struct rds_iw_device
*rds_iwdev
, struct rds_connection
*conn
);
304 void __rds_iw_destroy_conns(struct list_head
*list
, spinlock_t
*list_lock
);
305 static inline void rds_iw_destroy_nodev_conns(void)
307 __rds_iw_destroy_conns(&iw_nodev_conns
, &iw_nodev_conns_lock
);
309 static inline void rds_iw_destroy_conns(struct rds_iw_device
*rds_iwdev
)
311 __rds_iw_destroy_conns(&rds_iwdev
->conn_list
, &rds_iwdev
->spinlock
);
313 struct rds_iw_mr_pool
*rds_iw_create_mr_pool(struct rds_iw_device
*);
314 void rds_iw_get_mr_info(struct rds_iw_device
*rds_iwdev
, struct rds_info_rdma_connection
*iinfo
);
315 void rds_iw_destroy_mr_pool(struct rds_iw_mr_pool
*);
316 void *rds_iw_get_mr(struct scatterlist
*sg
, unsigned long nents
,
317 struct rds_sock
*rs
, u32
*key_ret
);
318 void rds_iw_sync_mr(void *trans_private
, int dir
);
319 void rds_iw_free_mr(void *trans_private
, int invalidate
);
320 void rds_iw_flush_mrs(void);
321 void rds_iw_remove_cm_id(struct rds_iw_device
*rds_iwdev
, struct rdma_cm_id
*cm_id
);
324 int __init
rds_iw_recv_init(void);
325 void rds_iw_recv_exit(void);
326 int rds_iw_recv(struct rds_connection
*conn
);
327 int rds_iw_recv_refill(struct rds_connection
*conn
, gfp_t kptr_gfp
,
328 gfp_t page_gfp
, int prefill
);
329 void rds_iw_inc_purge(struct rds_incoming
*inc
);
330 void rds_iw_inc_free(struct rds_incoming
*inc
);
331 int rds_iw_inc_copy_to_user(struct rds_incoming
*inc
, struct iovec
*iov
,
333 void rds_iw_recv_cq_comp_handler(struct ib_cq
*cq
, void *context
);
334 void rds_iw_recv_init_ring(struct rds_iw_connection
*ic
);
335 void rds_iw_recv_clear_ring(struct rds_iw_connection
*ic
);
336 void rds_iw_recv_init_ack(struct rds_iw_connection
*ic
);
337 void rds_iw_attempt_ack(struct rds_iw_connection
*ic
);
338 void rds_iw_ack_send_complete(struct rds_iw_connection
*ic
);
339 u64
rds_iw_piggyb_ack(struct rds_iw_connection
*ic
);
342 void rds_iw_ring_init(struct rds_iw_work_ring
*ring
, u32 nr
);
343 void rds_iw_ring_resize(struct rds_iw_work_ring
*ring
, u32 nr
);
344 u32
rds_iw_ring_alloc(struct rds_iw_work_ring
*ring
, u32 val
, u32
*pos
);
345 void rds_iw_ring_free(struct rds_iw_work_ring
*ring
, u32 val
);
346 void rds_iw_ring_unalloc(struct rds_iw_work_ring
*ring
, u32 val
);
347 int rds_iw_ring_empty(struct rds_iw_work_ring
*ring
);
348 int rds_iw_ring_low(struct rds_iw_work_ring
*ring
);
349 u32
rds_iw_ring_oldest(struct rds_iw_work_ring
*ring
);
350 u32
rds_iw_ring_completed(struct rds_iw_work_ring
*ring
, u32 wr_id
, u32 oldest
);
351 extern wait_queue_head_t rds_iw_ring_empty_wait
;
354 void rds_iw_xmit_complete(struct rds_connection
*conn
);
355 int rds_iw_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
356 unsigned int hdr_off
, unsigned int sg
, unsigned int off
);
357 void rds_iw_send_cq_comp_handler(struct ib_cq
*cq
, void *context
);
358 void rds_iw_send_init_ring(struct rds_iw_connection
*ic
);
359 void rds_iw_send_clear_ring(struct rds_iw_connection
*ic
);
360 int rds_iw_xmit_rdma(struct rds_connection
*conn
, struct rds_rdma_op
*op
);
361 void rds_iw_send_add_credits(struct rds_connection
*conn
, unsigned int credits
);
362 void rds_iw_advertise_credits(struct rds_connection
*conn
, unsigned int posted
);
363 int rds_iw_send_grab_credits(struct rds_iw_connection
*ic
, u32 wanted
,
364 u32
*adv_credits
, int need_posted
, int max_posted
);
367 DECLARE_PER_CPU(struct rds_iw_statistics
, rds_iw_stats
);
368 #define rds_iw_stats_inc(member) rds_stats_inc_which(rds_iw_stats, member)
369 unsigned int rds_iw_stats_info_copy(struct rds_info_iterator
*iter
,
373 int __init
rds_iw_sysctl_init(void);
374 void rds_iw_sysctl_exit(void);
375 extern unsigned long rds_iw_sysctl_max_send_wr
;
376 extern unsigned long rds_iw_sysctl_max_recv_wr
;
377 extern unsigned long rds_iw_sysctl_max_unsig_wrs
;
378 extern unsigned long rds_iw_sysctl_max_unsig_bytes
;
379 extern unsigned long rds_iw_sysctl_max_recv_allocation
;
380 extern unsigned int rds_iw_sysctl_flow_control
;
381 extern ctl_table rds_iw_sysctl_table
[];
384 * Helper functions for getting/setting the header and data SGEs in
385 * RDS packets (not RDMA)
387 static inline struct ib_sge
*
388 rds_iw_header_sge(struct rds_iw_connection
*ic
, struct ib_sge
*sge
)
393 static inline struct ib_sge
*
394 rds_iw_data_sge(struct rds_iw_connection
*ic
, struct ib_sge
*sge
)