4 #include <rdma/ib_verbs.h>
5 #include <rdma/rdma_cm.h>
7 #include "rdma_transport.h"
9 #define RDS_FMR_SIZE 256
10 #define RDS_FMR_POOL_SIZE 4096
12 #define RDS_IB_MAX_SGE 8
13 #define RDS_IB_RECV_SGE 2
15 #define RDS_IB_DEFAULT_RECV_WR 1024
16 #define RDS_IB_DEFAULT_SEND_WR 256
18 #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
20 extern struct list_head rds_ib_devices
;
23 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
24 * try and minimize the amount of memory tied up both the device and
25 * socket receive queues.
27 /* page offset of the final full frag that fits in the page */
28 #define RDS_PAGE_LAST_OFF (((PAGE_SIZE / RDS_FRAG_SIZE) - 1) * RDS_FRAG_SIZE)
29 struct rds_page_frag
{
30 struct list_head f_item
;
32 unsigned long f_offset
;
36 struct rds_ib_incoming
{
37 struct list_head ii_frags
;
38 struct rds_incoming ii_inc
;
41 struct rds_ib_connect_private
{
42 /* Add new fields at the end, and don't permute existing fields. */
47 __be16 dp_protocol_minor_mask
; /* bitmask */
50 __be32 dp_credit
; /* non-zero enables flow ctl */
53 struct rds_ib_send_work
{
54 struct rds_message
*s_rm
;
55 struct rds_rdma_op
*s_op
;
56 struct ib_send_wr s_wr
;
57 struct ib_sge s_sge
[RDS_IB_MAX_SGE
];
58 unsigned long s_queued
;
61 struct rds_ib_recv_work
{
62 struct rds_ib_incoming
*r_ibinc
;
63 struct rds_page_frag
*r_frag
;
64 struct ib_recv_wr r_wr
;
65 struct ib_sge r_sge
[2];
68 struct rds_ib_work_ring
{
78 struct rds_ib_connection
{
80 struct list_head ib_node
;
81 struct rds_ib_device
*rds_ibdev
;
82 struct rds_connection
*conn
;
84 /* alphabet soup, IBTA style */
85 struct rdma_cm_id
*i_cm_id
;
88 struct ib_cq
*i_send_cq
;
89 struct ib_cq
*i_recv_cq
;
92 struct rds_ib_work_ring i_send_ring
;
93 struct rds_message
*i_rm
;
94 struct rds_header
*i_send_hdrs
;
96 struct rds_ib_send_work
*i_sends
;
99 struct mutex i_recv_mutex
;
100 struct rds_ib_work_ring i_recv_ring
;
101 struct rds_ib_incoming
*i_ibinc
;
103 struct rds_header
*i_recv_hdrs
;
105 struct rds_ib_recv_work
*i_recvs
;
106 struct rds_page_frag i_frag
;
107 u64 i_ack_recv
; /* last ACK received */
110 unsigned long i_ack_flags
;
111 #ifdef KERNEL_HAS_ATOMIC64
112 atomic64_t i_ack_next
; /* next ACK to send */
114 spinlock_t i_ack_lock
; /* protect i_ack_next */
115 u64 i_ack_next
; /* next ACK to send */
117 struct rds_header
*i_ack
;
118 struct ib_send_wr i_ack_wr
;
119 struct ib_sge i_ack_sge
;
121 unsigned long i_ack_queued
;
123 /* Flow control related information
125 * Our algorithm uses a pair variables that we need to access
126 * atomically - one for the send credits, and one posted
127 * recv credits we need to transfer to remote.
128 * Rather than protect them using a slow spinlock, we put both into
129 * a single atomic_t and update it using cmpxchg
133 /* Protocol version specific information */
134 unsigned int i_flowctl
:1; /* enable/disable flow ctl */
136 /* Batched completions */
137 unsigned int i_unsignaled_wrs
;
138 long i_unsignaled_bytes
;
141 /* This assumes that atomic_t is at least 32 bits */
142 #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
143 #define IB_GET_POST_CREDITS(v) ((v) >> 16)
144 #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
145 #define IB_SET_POST_CREDITS(v) ((v) << 16)
147 struct rds_ib_ipaddr
{
148 struct list_head list
;
152 struct rds_ib_device
{
153 struct list_head list
;
154 struct list_head ipaddr_list
;
155 struct list_head conn_list
;
156 struct ib_device
*dev
;
159 struct rds_ib_mr_pool
*mr_pool
;
163 unsigned int fmr_max_remaps
;
164 unsigned int max_fmrs
;
166 unsigned int max_wrs
;
167 spinlock_t spinlock
; /* protect the above */
170 /* bits for i_ack_flags */
171 #define IB_ACK_IN_FLIGHT 0
172 #define IB_ACK_REQUESTED 1
174 /* Magic WR_ID for ACKs */
175 #define RDS_IB_ACK_WR_ID (~(u64) 0)
177 struct rds_ib_statistics
{
178 uint64_t s_ib_connect_raced
;
179 uint64_t s_ib_listen_closed_stale
;
180 uint64_t s_ib_tx_cq_call
;
181 uint64_t s_ib_tx_cq_event
;
182 uint64_t s_ib_tx_ring_full
;
183 uint64_t s_ib_tx_throttle
;
184 uint64_t s_ib_tx_sg_mapping_failure
;
185 uint64_t s_ib_tx_stalled
;
186 uint64_t s_ib_tx_credit_updates
;
187 uint64_t s_ib_rx_cq_call
;
188 uint64_t s_ib_rx_cq_event
;
189 uint64_t s_ib_rx_ring_empty
;
190 uint64_t s_ib_rx_refill_from_cq
;
191 uint64_t s_ib_rx_refill_from_thread
;
192 uint64_t s_ib_rx_alloc_limit
;
193 uint64_t s_ib_rx_credit_updates
;
194 uint64_t s_ib_ack_sent
;
195 uint64_t s_ib_ack_send_failure
;
196 uint64_t s_ib_ack_send_delayed
;
197 uint64_t s_ib_ack_send_piggybacked
;
198 uint64_t s_ib_ack_received
;
199 uint64_t s_ib_rdma_mr_alloc
;
200 uint64_t s_ib_rdma_mr_free
;
201 uint64_t s_ib_rdma_mr_used
;
202 uint64_t s_ib_rdma_mr_pool_flush
;
203 uint64_t s_ib_rdma_mr_pool_wait
;
204 uint64_t s_ib_rdma_mr_pool_depleted
;
207 extern struct workqueue_struct
*rds_ib_wq
;
210 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
213 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device
*dev
,
214 struct scatterlist
*sg
, unsigned int sg_dma_len
, int direction
)
218 for (i
= 0; i
< sg_dma_len
; ++i
) {
219 ib_dma_sync_single_for_cpu(dev
,
220 ib_sg_dma_address(dev
, &sg
[i
]),
221 ib_sg_dma_len(dev
, &sg
[i
]),
225 #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
227 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device
*dev
,
228 struct scatterlist
*sg
, unsigned int sg_dma_len
, int direction
)
232 for (i
= 0; i
< sg_dma_len
; ++i
) {
233 ib_dma_sync_single_for_device(dev
,
234 ib_sg_dma_address(dev
, &sg
[i
]),
235 ib_sg_dma_len(dev
, &sg
[i
]),
239 #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
243 extern struct rds_transport rds_ib_transport
;
244 extern void rds_ib_add_one(struct ib_device
*device
);
245 extern void rds_ib_remove_one(struct ib_device
*device
);
246 extern struct ib_client rds_ib_client
;
248 extern unsigned int fmr_pool_size
;
249 extern unsigned int fmr_message_size
;
251 extern spinlock_t ib_nodev_conns_lock
;
252 extern struct list_head ib_nodev_conns
;
255 int rds_ib_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
);
256 void rds_ib_conn_free(void *arg
);
257 int rds_ib_conn_connect(struct rds_connection
*conn
);
258 void rds_ib_conn_shutdown(struct rds_connection
*conn
);
259 void rds_ib_state_change(struct sock
*sk
);
260 int __init
rds_ib_listen_init(void);
261 void rds_ib_listen_stop(void);
262 void __rds_ib_conn_error(struct rds_connection
*conn
, const char *, ...);
263 int rds_ib_cm_handle_connect(struct rdma_cm_id
*cm_id
,
264 struct rdma_cm_event
*event
);
265 int rds_ib_cm_initiate_connect(struct rdma_cm_id
*cm_id
);
266 void rds_ib_cm_connect_complete(struct rds_connection
*conn
,
267 struct rdma_cm_event
*event
);
270 #define rds_ib_conn_error(conn, fmt...) \
271 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
274 int rds_ib_update_ipaddr(struct rds_ib_device
*rds_ibdev
, __be32 ipaddr
);
275 void rds_ib_add_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
);
276 void rds_ib_remove_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
);
277 void __rds_ib_destroy_conns(struct list_head
*list
, spinlock_t
*list_lock
);
278 static inline void rds_ib_destroy_nodev_conns(void)
280 __rds_ib_destroy_conns(&ib_nodev_conns
, &ib_nodev_conns_lock
);
282 static inline void rds_ib_destroy_conns(struct rds_ib_device
*rds_ibdev
)
284 __rds_ib_destroy_conns(&rds_ibdev
->conn_list
, &rds_ibdev
->spinlock
);
286 struct rds_ib_mr_pool
*rds_ib_create_mr_pool(struct rds_ib_device
*);
287 void rds_ib_get_mr_info(struct rds_ib_device
*rds_ibdev
, struct rds_info_rdma_connection
*iinfo
);
288 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool
*);
289 void *rds_ib_get_mr(struct scatterlist
*sg
, unsigned long nents
,
290 struct rds_sock
*rs
, u32
*key_ret
);
291 void rds_ib_sync_mr(void *trans_private
, int dir
);
292 void rds_ib_free_mr(void *trans_private
, int invalidate
);
293 void rds_ib_flush_mrs(void);
296 int __init
rds_ib_recv_init(void);
297 void rds_ib_recv_exit(void);
298 int rds_ib_recv(struct rds_connection
*conn
);
299 int rds_ib_recv_refill(struct rds_connection
*conn
, gfp_t kptr_gfp
,
300 gfp_t page_gfp
, int prefill
);
301 void rds_ib_inc_purge(struct rds_incoming
*inc
);
302 void rds_ib_inc_free(struct rds_incoming
*inc
);
303 int rds_ib_inc_copy_to_user(struct rds_incoming
*inc
, struct iovec
*iov
,
305 void rds_ib_recv_cq_comp_handler(struct ib_cq
*cq
, void *context
);
306 void rds_ib_recv_init_ring(struct rds_ib_connection
*ic
);
307 void rds_ib_recv_clear_ring(struct rds_ib_connection
*ic
);
308 void rds_ib_recv_init_ack(struct rds_ib_connection
*ic
);
309 void rds_ib_attempt_ack(struct rds_ib_connection
*ic
);
310 void rds_ib_ack_send_complete(struct rds_ib_connection
*ic
);
311 u64
rds_ib_piggyb_ack(struct rds_ib_connection
*ic
);
314 void rds_ib_ring_init(struct rds_ib_work_ring
*ring
, u32 nr
);
315 void rds_ib_ring_resize(struct rds_ib_work_ring
*ring
, u32 nr
);
316 u32
rds_ib_ring_alloc(struct rds_ib_work_ring
*ring
, u32 val
, u32
*pos
);
317 void rds_ib_ring_free(struct rds_ib_work_ring
*ring
, u32 val
);
318 void rds_ib_ring_unalloc(struct rds_ib_work_ring
*ring
, u32 val
);
319 int rds_ib_ring_empty(struct rds_ib_work_ring
*ring
);
320 int rds_ib_ring_low(struct rds_ib_work_ring
*ring
);
321 u32
rds_ib_ring_oldest(struct rds_ib_work_ring
*ring
);
322 u32
rds_ib_ring_completed(struct rds_ib_work_ring
*ring
, u32 wr_id
, u32 oldest
);
323 extern wait_queue_head_t rds_ib_ring_empty_wait
;
326 void rds_ib_xmit_complete(struct rds_connection
*conn
);
327 int rds_ib_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
328 unsigned int hdr_off
, unsigned int sg
, unsigned int off
);
329 void rds_ib_send_cq_comp_handler(struct ib_cq
*cq
, void *context
);
330 void rds_ib_send_init_ring(struct rds_ib_connection
*ic
);
331 void rds_ib_send_clear_ring(struct rds_ib_connection
*ic
);
332 int rds_ib_xmit_rdma(struct rds_connection
*conn
, struct rds_rdma_op
*op
);
333 void rds_ib_send_add_credits(struct rds_connection
*conn
, unsigned int credits
);
334 void rds_ib_advertise_credits(struct rds_connection
*conn
, unsigned int posted
);
335 int rds_ib_send_grab_credits(struct rds_ib_connection
*ic
, u32 wanted
,
336 u32
*adv_credits
, int need_posted
);
339 DECLARE_PER_CPU(struct rds_ib_statistics
, rds_ib_stats
);
340 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
341 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator
*iter
,
345 int __init
rds_ib_sysctl_init(void);
346 void rds_ib_sysctl_exit(void);
347 extern unsigned long rds_ib_sysctl_max_send_wr
;
348 extern unsigned long rds_ib_sysctl_max_recv_wr
;
349 extern unsigned long rds_ib_sysctl_max_unsig_wrs
;
350 extern unsigned long rds_ib_sysctl_max_unsig_bytes
;
351 extern unsigned long rds_ib_sysctl_max_recv_allocation
;
352 extern unsigned int rds_ib_sysctl_flow_control
;
353 extern ctl_table rds_ib_sysctl_table
[];
356 * Helper functions for getting/setting the header and data SGEs in
357 * RDS packets (not RDMA)
359 static inline struct ib_sge
*
360 rds_ib_header_sge(struct rds_ib_connection
*ic
, struct ib_sge
*sge
)
365 static inline struct ib_sge
*
366 rds_ib_data_sge(struct rds_ib_connection
*ic
, struct ib_sge
*sge
)