1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <rdma/ib_verbs.h>
6 #include <rdma/rdma_cm.h>
7 #include <linux/interrupt.h>
9 #include <linux/slab.h>
11 #include "rdma_transport.h"
13 #define RDS_IB_MAX_SGE 8
14 #define RDS_IB_RECV_SGE 2
16 #define RDS_IB_DEFAULT_RECV_WR 1024
17 #define RDS_IB_DEFAULT_SEND_WR 256
18 #define RDS_IB_DEFAULT_FR_WR 256
19 #define RDS_IB_DEFAULT_FR_INV_WR 256
21 #define RDS_IB_DEFAULT_RETRY_COUNT 1
23 #define RDS_IB_SUPPORTED_PROTOCOLS 0x00000003 /* minor versions supported */
25 #define RDS_IB_RECYCLE_BATCH_COUNT 32
27 #define RDS_IB_WC_MAX 32
29 extern struct rw_semaphore rds_ib_devices_lock
;
30 extern struct list_head rds_ib_devices
;
33 * IB posts RDS_FRAG_SIZE fragments of pages to the receive queues to
34 * try and minimize the amount of memory tied up both the device and
35 * socket receive queues.
37 struct rds_page_frag
{
38 struct list_head f_item
;
39 struct list_head f_cache_entry
;
40 struct scatterlist f_sg
;
43 struct rds_ib_incoming
{
44 struct list_head ii_frags
;
45 struct list_head ii_cache_entry
;
46 struct rds_incoming ii_inc
;
49 struct rds_ib_cache_head
{
50 struct list_head
*first
;
54 struct rds_ib_refill_cache
{
55 struct rds_ib_cache_head __percpu
*percpu
;
56 struct list_head
*xfer
;
57 struct list_head
*ready
;
60 struct rds_ib_connect_private
{
61 /* Add new fields at the end, and don't permute existing fields. */
66 __be16 dp_protocol_minor_mask
; /* bitmask */
69 __be32 dp_credit
; /* non-zero enables flow ctl */
72 struct rds_ib_send_work
{
75 struct ib_send_wr s_wr
;
76 struct ib_rdma_wr s_rdma_wr
;
77 struct ib_atomic_wr s_atomic_wr
;
79 struct ib_sge s_sge
[RDS_IB_MAX_SGE
];
80 unsigned long s_queued
;
83 struct rds_ib_recv_work
{
84 struct rds_ib_incoming
*r_ibinc
;
85 struct rds_page_frag
*r_frag
;
86 struct ib_recv_wr r_wr
;
87 struct ib_sge r_sge
[2];
90 struct rds_ib_work_ring
{
98 /* Rings are posted with all the allocations they'll need to queue the
99 * incoming message to the receiving socket so this can't fail.
100 * All fragments start with a header, so we can make sure we're not receiving
101 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
103 struct rds_ib_ack_state
{
106 unsigned int ack_required
:1;
107 unsigned int ack_next_valid
:1;
108 unsigned int ack_recv_valid
:1;
112 struct rds_ib_device
;
114 struct rds_ib_connection
{
116 struct list_head ib_node
;
117 struct rds_ib_device
*rds_ibdev
;
118 struct rds_connection
*conn
;
120 /* alphabet soup, IBTA style */
121 struct rdma_cm_id
*i_cm_id
;
123 struct ib_cq
*i_send_cq
;
124 struct ib_cq
*i_recv_cq
;
125 struct ib_wc i_send_wc
[RDS_IB_WC_MAX
];
126 struct ib_wc i_recv_wc
[RDS_IB_WC_MAX
];
128 /* To control the number of wrs from fastreg */
129 atomic_t i_fastreg_wrs
;
130 atomic_t i_fastunreg_wrs
;
132 /* interrupt handling */
133 struct tasklet_struct i_send_tasklet
;
134 struct tasklet_struct i_recv_tasklet
;
137 struct rds_ib_work_ring i_send_ring
;
138 struct rm_data_op
*i_data_op
;
139 struct rds_header
*i_send_hdrs
;
140 dma_addr_t i_send_hdrs_dma
;
141 struct rds_ib_send_work
*i_sends
;
142 atomic_t i_signaled_sends
;
145 struct mutex i_recv_mutex
;
146 struct rds_ib_work_ring i_recv_ring
;
147 struct rds_ib_incoming
*i_ibinc
;
149 struct rds_header
*i_recv_hdrs
;
150 dma_addr_t i_recv_hdrs_dma
;
151 struct rds_ib_recv_work
*i_recvs
;
152 u64 i_ack_recv
; /* last ACK received */
153 struct rds_ib_refill_cache i_cache_incs
;
154 struct rds_ib_refill_cache i_cache_frags
;
155 atomic_t i_cache_allocs
;
158 unsigned long i_ack_flags
;
159 #ifdef KERNEL_HAS_ATOMIC64
160 atomic64_t i_ack_next
; /* next ACK to send */
162 spinlock_t i_ack_lock
; /* protect i_ack_next */
163 u64 i_ack_next
; /* next ACK to send */
165 struct rds_header
*i_ack
;
166 struct ib_send_wr i_ack_wr
;
167 struct ib_sge i_ack_sge
;
168 dma_addr_t i_ack_dma
;
169 unsigned long i_ack_queued
;
171 /* Flow control related information
173 * Our algorithm uses a pair variables that we need to access
174 * atomically - one for the send credits, and one posted
175 * recv credits we need to transfer to remote.
176 * Rather than protect them using a slow spinlock, we put both into
177 * a single atomic_t and update it using cmpxchg
181 /* Protocol version specific information */
182 unsigned int i_flowctl
:1; /* enable/disable flow ctl */
184 /* Batched completions */
185 unsigned int i_unsignaled_wrs
;
187 /* Endpoint role in connection */
189 atomic_t i_cq_quiesce
;
191 /* Send/Recv vectors */
196 /* This assumes that atomic_t is at least 32 bits */
197 #define IB_GET_SEND_CREDITS(v) ((v) & 0xffff)
198 #define IB_GET_POST_CREDITS(v) ((v) >> 16)
199 #define IB_SET_SEND_CREDITS(v) ((v) & 0xffff)
200 #define IB_SET_POST_CREDITS(v) ((v) << 16)
202 struct rds_ib_ipaddr
{
203 struct list_head list
;
213 struct rds_ib_device
{
214 struct list_head list
;
215 struct list_head ipaddr_list
;
216 struct list_head conn_list
;
217 struct ib_device
*dev
;
223 unsigned int max_mrs
;
224 struct rds_ib_mr_pool
*mr_1m_pool
;
225 struct rds_ib_mr_pool
*mr_8k_pool
;
226 unsigned int fmr_max_remaps
;
227 unsigned int max_8k_mrs
;
228 unsigned int max_1m_mrs
;
230 unsigned int max_wrs
;
231 unsigned int max_initiator_depth
;
232 unsigned int max_responder_resources
;
233 spinlock_t spinlock
; /* protect the above */
235 struct work_struct free_work
;
239 #define ibdev_to_node(ibdev) dev_to_node((ibdev)->dev.parent)
240 #define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
242 /* bits for i_ack_flags */
243 #define IB_ACK_IN_FLIGHT 0
244 #define IB_ACK_REQUESTED 1
246 /* Magic WR_ID for ACKs */
247 #define RDS_IB_ACK_WR_ID (~(u64) 0)
249 struct rds_ib_statistics
{
250 uint64_t s_ib_connect_raced
;
251 uint64_t s_ib_listen_closed_stale
;
252 uint64_t s_ib_evt_handler_call
;
253 uint64_t s_ib_tasklet_call
;
254 uint64_t s_ib_tx_cq_event
;
255 uint64_t s_ib_tx_ring_full
;
256 uint64_t s_ib_tx_throttle
;
257 uint64_t s_ib_tx_sg_mapping_failure
;
258 uint64_t s_ib_tx_stalled
;
259 uint64_t s_ib_tx_credit_updates
;
260 uint64_t s_ib_rx_cq_event
;
261 uint64_t s_ib_rx_ring_empty
;
262 uint64_t s_ib_rx_refill_from_cq
;
263 uint64_t s_ib_rx_refill_from_thread
;
264 uint64_t s_ib_rx_alloc_limit
;
265 uint64_t s_ib_rx_total_frags
;
266 uint64_t s_ib_rx_total_incs
;
267 uint64_t s_ib_rx_credit_updates
;
268 uint64_t s_ib_ack_sent
;
269 uint64_t s_ib_ack_send_failure
;
270 uint64_t s_ib_ack_send_delayed
;
271 uint64_t s_ib_ack_send_piggybacked
;
272 uint64_t s_ib_ack_received
;
273 uint64_t s_ib_rdma_mr_8k_alloc
;
274 uint64_t s_ib_rdma_mr_8k_free
;
275 uint64_t s_ib_rdma_mr_8k_used
;
276 uint64_t s_ib_rdma_mr_8k_pool_flush
;
277 uint64_t s_ib_rdma_mr_8k_pool_wait
;
278 uint64_t s_ib_rdma_mr_8k_pool_depleted
;
279 uint64_t s_ib_rdma_mr_1m_alloc
;
280 uint64_t s_ib_rdma_mr_1m_free
;
281 uint64_t s_ib_rdma_mr_1m_used
;
282 uint64_t s_ib_rdma_mr_1m_pool_flush
;
283 uint64_t s_ib_rdma_mr_1m_pool_wait
;
284 uint64_t s_ib_rdma_mr_1m_pool_depleted
;
285 uint64_t s_ib_rdma_mr_8k_reused
;
286 uint64_t s_ib_rdma_mr_1m_reused
;
287 uint64_t s_ib_atomic_cswp
;
288 uint64_t s_ib_atomic_fadd
;
289 uint64_t s_ib_recv_added_to_cache
;
290 uint64_t s_ib_recv_removed_from_cache
;
293 extern struct workqueue_struct
*rds_ib_wq
;
296 * Fake ib_dma_sync_sg_for_{cpu,device} as long as ib_verbs.h
299 static inline void rds_ib_dma_sync_sg_for_cpu(struct ib_device
*dev
,
300 struct scatterlist
*sglist
,
301 unsigned int sg_dma_len
,
304 struct scatterlist
*sg
;
307 for_each_sg(sglist
, sg
, sg_dma_len
, i
) {
308 ib_dma_sync_single_for_cpu(dev
,
309 ib_sg_dma_address(dev
, sg
),
310 ib_sg_dma_len(dev
, sg
),
314 #define ib_dma_sync_sg_for_cpu rds_ib_dma_sync_sg_for_cpu
316 static inline void rds_ib_dma_sync_sg_for_device(struct ib_device
*dev
,
317 struct scatterlist
*sglist
,
318 unsigned int sg_dma_len
,
321 struct scatterlist
*sg
;
324 for_each_sg(sglist
, sg
, sg_dma_len
, i
) {
325 ib_dma_sync_single_for_device(dev
,
326 ib_sg_dma_address(dev
, sg
),
327 ib_sg_dma_len(dev
, sg
),
331 #define ib_dma_sync_sg_for_device rds_ib_dma_sync_sg_for_device
335 extern struct rds_transport rds_ib_transport
;
336 struct rds_ib_device
*rds_ib_get_client_data(struct ib_device
*device
);
337 void rds_ib_dev_put(struct rds_ib_device
*rds_ibdev
);
338 extern struct ib_client rds_ib_client
;
340 extern unsigned int rds_ib_retry_count
;
342 extern spinlock_t ib_nodev_conns_lock
;
343 extern struct list_head ib_nodev_conns
;
346 int rds_ib_conn_alloc(struct rds_connection
*conn
, gfp_t gfp
);
347 void rds_ib_conn_free(void *arg
);
348 int rds_ib_conn_path_connect(struct rds_conn_path
*cp
);
349 void rds_ib_conn_path_shutdown(struct rds_conn_path
*cp
);
350 void rds_ib_state_change(struct sock
*sk
);
351 int rds_ib_listen_init(void);
352 void rds_ib_listen_stop(void);
354 void __rds_ib_conn_error(struct rds_connection
*conn
, const char *, ...);
355 int rds_ib_cm_handle_connect(struct rdma_cm_id
*cm_id
,
356 struct rdma_cm_event
*event
);
357 int rds_ib_cm_initiate_connect(struct rdma_cm_id
*cm_id
);
358 void rds_ib_cm_connect_complete(struct rds_connection
*conn
,
359 struct rdma_cm_event
*event
);
362 #define rds_ib_conn_error(conn, fmt...) \
363 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt)
366 int rds_ib_update_ipaddr(struct rds_ib_device
*rds_ibdev
, __be32 ipaddr
);
367 void rds_ib_add_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
);
368 void rds_ib_remove_conn(struct rds_ib_device
*rds_ibdev
, struct rds_connection
*conn
);
369 void rds_ib_destroy_nodev_conns(void);
370 void rds_ib_mr_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
);
373 int rds_ib_recv_init(void);
374 void rds_ib_recv_exit(void);
375 int rds_ib_recv_path(struct rds_conn_path
*conn
);
376 int rds_ib_recv_alloc_caches(struct rds_ib_connection
*ic
, gfp_t gfp
);
377 void rds_ib_recv_free_caches(struct rds_ib_connection
*ic
);
378 void rds_ib_recv_refill(struct rds_connection
*conn
, int prefill
, gfp_t gfp
);
379 void rds_ib_inc_free(struct rds_incoming
*inc
);
380 int rds_ib_inc_copy_to_user(struct rds_incoming
*inc
, struct iov_iter
*to
);
381 void rds_ib_recv_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
,
382 struct rds_ib_ack_state
*state
);
383 void rds_ib_recv_tasklet_fn(unsigned long data
);
384 void rds_ib_recv_init_ring(struct rds_ib_connection
*ic
);
385 void rds_ib_recv_clear_ring(struct rds_ib_connection
*ic
);
386 void rds_ib_recv_init_ack(struct rds_ib_connection
*ic
);
387 void rds_ib_attempt_ack(struct rds_ib_connection
*ic
);
388 void rds_ib_ack_send_complete(struct rds_ib_connection
*ic
);
389 u64
rds_ib_piggyb_ack(struct rds_ib_connection
*ic
);
390 void rds_ib_set_ack(struct rds_ib_connection
*ic
, u64 seq
, int ack_required
);
393 void rds_ib_ring_init(struct rds_ib_work_ring
*ring
, u32 nr
);
394 void rds_ib_ring_resize(struct rds_ib_work_ring
*ring
, u32 nr
);
395 u32
rds_ib_ring_alloc(struct rds_ib_work_ring
*ring
, u32 val
, u32
*pos
);
396 void rds_ib_ring_free(struct rds_ib_work_ring
*ring
, u32 val
);
397 void rds_ib_ring_unalloc(struct rds_ib_work_ring
*ring
, u32 val
);
398 int rds_ib_ring_empty(struct rds_ib_work_ring
*ring
);
399 int rds_ib_ring_low(struct rds_ib_work_ring
*ring
);
400 u32
rds_ib_ring_oldest(struct rds_ib_work_ring
*ring
);
401 u32
rds_ib_ring_completed(struct rds_ib_work_ring
*ring
, u32 wr_id
, u32 oldest
);
402 extern wait_queue_head_t rds_ib_ring_empty_wait
;
405 void rds_ib_xmit_path_complete(struct rds_conn_path
*cp
);
406 int rds_ib_xmit(struct rds_connection
*conn
, struct rds_message
*rm
,
407 unsigned int hdr_off
, unsigned int sg
, unsigned int off
);
408 void rds_ib_send_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
);
409 void rds_ib_send_init_ring(struct rds_ib_connection
*ic
);
410 void rds_ib_send_clear_ring(struct rds_ib_connection
*ic
);
411 int rds_ib_xmit_rdma(struct rds_connection
*conn
, struct rm_rdma_op
*op
);
412 void rds_ib_send_add_credits(struct rds_connection
*conn
, unsigned int credits
);
413 void rds_ib_advertise_credits(struct rds_connection
*conn
, unsigned int posted
);
414 int rds_ib_send_grab_credits(struct rds_ib_connection
*ic
, u32 wanted
,
415 u32
*adv_credits
, int need_posted
, int max_posted
);
416 int rds_ib_xmit_atomic(struct rds_connection
*conn
, struct rm_atomic_op
*op
);
419 DECLARE_PER_CPU(struct rds_ib_statistics
, rds_ib_stats
);
420 #define rds_ib_stats_inc(member) rds_stats_inc_which(rds_ib_stats, member)
421 #define rds_ib_stats_add(member, count) \
422 rds_stats_add_which(rds_ib_stats, member, count)
423 unsigned int rds_ib_stats_info_copy(struct rds_info_iterator
*iter
,
427 int rds_ib_sysctl_init(void);
428 void rds_ib_sysctl_exit(void);
429 extern unsigned long rds_ib_sysctl_max_send_wr
;
430 extern unsigned long rds_ib_sysctl_max_recv_wr
;
431 extern unsigned long rds_ib_sysctl_max_unsig_wrs
;
432 extern unsigned long rds_ib_sysctl_max_unsig_bytes
;
433 extern unsigned long rds_ib_sysctl_max_recv_allocation
;
434 extern unsigned int rds_ib_sysctl_flow_control
;