5 #include <linux/scatterlist.h>
6 #include <linux/highmem.h>
7 #include <rdma/rdma_cm.h>
8 #include <linux/mutex.h>
14 * RDS Network protocol version
16 #define RDS_PROTOCOL_3_0 0x0300
17 #define RDS_PROTOCOL_3_1 0x0301
18 #define RDS_PROTOCOL_VERSION RDS_PROTOCOL_3_1
19 #define RDS_PROTOCOL_MAJOR(v) ((v) >> 8)
20 #define RDS_PROTOCOL_MINOR(v) ((v) & 255)
21 #define RDS_PROTOCOL(maj, min) (((maj) << 8) | min)
24 * XXX randomly chosen, but at least seems to be unused:
25 * # 18464-18768 Unassigned
26 * We should do better. We want a reserved port to discourage unpriv'ed
27 * userspace from listening.
29 #define RDS_PORT 18634
32 #define KERNEL_HAS_ATOMIC64
36 #define rdsdebug(fmt, args...) pr_debug("%s(): " fmt, __func__ , ##args)
38 /* sigh, pr_debug() causes unused variable warnings */
39 static inline void __attribute__ ((format (printf
, 1, 2)))
40 rdsdebug(char *fmt
, ...)
45 /* XXX is there one of these somewhere? */
47 ({ unsigned long __x = (x), __y = (y); (__x + __y - 1) / __y; })
49 #define RDS_FRAG_SHIFT 12
50 #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
52 #define RDS_CONG_MAP_BYTES (65536 / 8)
53 #define RDS_CONG_MAP_LONGS (RDS_CONG_MAP_BYTES / sizeof(unsigned long))
54 #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
55 #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
58 struct rb_node m_rb_node
;
60 wait_queue_head_t m_waitq
;
61 struct list_head m_conn_list
;
62 unsigned long m_page_addrs
[RDS_CONG_MAP_PAGES
];
67 * This is how we will track the connection state:
68 * A connection is always in one of the following
69 * states. Updates to the state are atomic and imply
75 RDS_CONN_DISCONNECTING
,
80 /* Bits for c_flags */
81 #define RDS_LL_SEND_FULL 0
82 #define RDS_RECONNECT_PENDING 1
85 struct rds_connection
{
86 struct hlist_node c_hash_node
;
89 unsigned int c_loopback
:1;
90 struct rds_connection
*c_passive
;
92 struct rds_cong_map
*c_lcong
;
93 struct rds_cong_map
*c_fcong
;
95 struct rds_message
*c_xmit_rm
;
96 unsigned long c_xmit_sg
;
97 unsigned int c_xmit_hdr_off
;
98 unsigned int c_xmit_data_off
;
99 unsigned int c_xmit_atomic_sent
;
100 unsigned int c_xmit_rdma_sent
;
101 unsigned int c_xmit_data_sent
;
103 spinlock_t c_lock
; /* protect msg queues */
105 struct list_head c_send_queue
;
106 struct list_head c_retrans
;
110 struct rds_transport
*c_trans
;
111 void *c_transport_data
;
114 unsigned long c_flags
;
115 unsigned long c_reconnect_jiffies
;
116 struct delayed_work c_send_w
;
117 struct delayed_work c_recv_w
;
118 struct delayed_work c_conn_w
;
119 struct work_struct c_down_w
;
120 struct mutex c_cm_lock
; /* protect conn state & cm */
121 wait_queue_head_t c_waitq
;
123 struct list_head c_map_item
;
124 unsigned long c_map_queued
;
126 unsigned int c_unacked_packets
;
127 unsigned int c_unacked_bytes
;
129 /* Protocol version */
130 unsigned int c_version
;
133 #define RDS_FLAG_CONG_BITMAP 0x01
134 #define RDS_FLAG_ACK_REQUIRED 0x02
135 #define RDS_FLAG_RETRANSMITTED 0x04
136 #define RDS_MAX_ADV_CREDIT 255
139 * Maximum space available for extension headers.
141 #define RDS_HEADER_EXT_SPACE 16
154 u8 h_exthdr
[RDS_HEADER_EXT_SPACE
];
158 * Reserved - indicates end of extensions
160 #define RDS_EXTHDR_NONE 0
163 * This extension header is included in the very
164 * first message that is sent on a new connection,
165 * and identifies the protocol level. This will help
166 * rolling updates if a future change requires breaking
168 * NB: This is no longer true for IB, where we do a version
169 * negotiation during the connection setup phase (protocol
170 * version information is included in the RDMA CM private data).
172 #define RDS_EXTHDR_VERSION 1
173 struct rds_ext_header_version
{
178 * This extension header is included in the RDS message
179 * chasing an RDMA operation.
181 #define RDS_EXTHDR_RDMA 2
182 struct rds_ext_header_rdma
{
187 * This extension header tells the peer about the
188 * destination <R_Key,offset> of the requested RDMA
191 #define RDS_EXTHDR_RDMA_DEST 3
192 struct rds_ext_header_rdma_dest
{
194 __be32 h_rdma_offset
;
197 #define __RDS_EXTHDR_MAX 16 /* for now */
199 struct rds_incoming
{
201 struct list_head i_item
;
202 struct rds_connection
*i_conn
;
203 struct rds_header i_hdr
;
204 unsigned long i_rx_jiffies
;
207 rds_rdma_cookie_t i_rdma_cookie
;
211 struct rb_node r_rb_node
;
215 /* A copy of the creation flags */
216 unsigned int r_use_once
:1;
217 unsigned int r_invalidate
:1;
218 unsigned int r_write
:1;
220 /* This is for RDS_MR_DEAD.
221 * It would be nice & consistent to make this part of the above
222 * bit field here, but we need to use test_and_set_bit.
224 unsigned long r_state
;
225 struct rds_sock
*r_sock
; /* back pointer to the socket that owns us */
226 struct rds_transport
*r_trans
;
227 void *r_trans_private
;
230 /* Flags for mr->r_state */
231 #define RDS_MR_DEAD 0
233 static inline rds_rdma_cookie_t
rds_rdma_make_cookie(u32 r_key
, u32 offset
)
235 return r_key
| (((u64
) offset
) << 32);
238 static inline u32
rds_rdma_cookie_key(rds_rdma_cookie_t cookie
)
243 static inline u32
rds_rdma_cookie_offset(rds_rdma_cookie_t cookie
)
248 /* atomic operation types */
249 #define RDS_ATOMIC_TYPE_CSWP 0
250 #define RDS_ATOMIC_TYPE_FADD 1
253 * m_sock_item and m_conn_item are on lists that are serialized under
254 * conn->c_lock. m_sock_item has additional meaning in that once it is empty
255 * the message will not be put back on the retransmit list after being sent.
256 * messages that are canceled while being sent rely on this.
258 * m_inc is used by loopback so that it can pass an incoming message straight
259 * back up into the rx path. It embeds a wire header which is also used by
260 * the send path, which is kind of awkward.
262 * m_sock_item indicates the message's presence on a socket's send or receive
263 * queue. m_rs will point to that socket.
265 * m_daddr is used by cancellation to prune messages to a given destination.
267 * The RDS_MSG_ON_SOCK and RDS_MSG_ON_CONN flags are used to avoid lock
268 * nesting. As paths iterate over messages on a sock, or conn, they must
269 * also lock the conn, or sock, to remove the message from those lists too.
270 * Testing the flag to determine if the message is still on the lists lets
271 * us avoid testing the list_head directly. That means each path can use
272 * the message's list_head to keep it on a local list while juggling locks
273 * without confusing the other path.
275 * m_ack_seq is an optional field set by transports who need a different
276 * sequence number range to invalidate. They can use this in a callback
277 * that they pass to rds_send_drop_acked() to see if each message has been
278 * acked. The HAS_ACK_SEQ flag can be used to detect messages which haven't
279 * had ack_seq set yet.
281 #define RDS_MSG_ON_SOCK 1
282 #define RDS_MSG_ON_CONN 2
283 #define RDS_MSG_HAS_ACK_SEQ 3
284 #define RDS_MSG_ACK_REQUIRED 4
285 #define RDS_MSG_RETRANSMITTED 5
286 #define RDS_MSG_MAPPED 6
287 #define RDS_MSG_PAGEVEC 7
291 struct list_head m_sock_item
;
292 struct list_head m_conn_item
;
293 struct rds_incoming m_inc
;
296 unsigned long m_flags
;
298 /* Never access m_rs without holding m_rs_lock.
303 spinlock_t m_rs_lock
;
304 wait_queue_head_t m_flush_wait
;
306 struct rds_sock
*m_rs
;
308 /* cookie to send to remote, in rds header */
309 rds_rdma_cookie_t m_rdma_cookie
;
311 unsigned int m_used_sgs
;
312 unsigned int m_total_sgs
;
317 struct rm_atomic_op
{
323 uint64_t compare_mask
;
328 uint64_t nocarry_mask
;
334 unsigned int op_notify
:1;
335 unsigned int op_recverr
:1;
336 unsigned int op_mapped
:1;
337 unsigned int op_silent
:1;
338 unsigned int op_active
:1;
339 struct scatterlist
*op_sg
;
340 struct rds_notifier
*op_notifier
;
342 struct rds_mr
*op_rdma_mr
;
347 unsigned int op_write
:1;
348 unsigned int op_fence
:1;
349 unsigned int op_notify
:1;
350 unsigned int op_recverr
:1;
351 unsigned int op_mapped
:1;
352 unsigned int op_silent
:1;
353 unsigned int op_active
:1;
354 unsigned int op_bytes
;
355 unsigned int op_nents
;
356 unsigned int op_count
;
357 struct scatterlist
*op_sg
;
358 struct rds_notifier
*op_notifier
;
360 struct rds_mr
*op_rdma_mr
;
363 unsigned int op_active
:1;
364 unsigned int op_nents
;
365 unsigned int op_count
;
366 struct scatterlist
*op_sg
;
372 * The RDS notifier is used (optionally) to tell the application about
373 * completed RDMA operations. Rather than keeping the whole rds message
374 * around on the queue, we allocate a small notifier that is put on the
375 * socket's notifier_list. Notifications are delivered to the application
376 * through control messages.
378 struct rds_notifier
{
379 struct list_head n_list
;
380 uint64_t n_user_token
;
385 * struct rds_transport - transport specific behavioural hooks
387 * @xmit: .xmit is called by rds_send_xmit() to tell the transport to send
388 * part of a message. The caller serializes on the send_sem so this
389 * doesn't need to be reentrant for a given conn. The header must be
390 * sent before the data payload. .xmit must be prepared to send a
391 * message with no data payload. .xmit should return the number of
392 * bytes that were sent down the connection, including header bytes.
393 * Returning 0 tells the caller that it doesn't need to perform any
394 * additional work now. This is usually the case when the transport has
395 * filled the sending queue for its connection and will handle
396 * triggering the rds thread to continue the send when space becomes
397 * available. Returning -EAGAIN tells the caller to retry the send
398 * immediately. Returning -ENOMEM tells the caller to retry the send at
399 * some point in the future.
401 * @conn_shutdown: conn_shutdown stops traffic on the given connection. Once
402 * it returns the connection can not call rds_recv_incoming().
403 * This will only be called once after conn_connect returns
404 * non-zero success and will The caller serializes this with
405 * the send and connecting paths (xmit_* and conn_*). The
406 * transport is responsible for other serialization, including
407 * rds_recv_incoming(). This is called in process context but
408 * should try hard not to block.
411 #define RDS_TRANS_IB 0
412 #define RDS_TRANS_IWARP 1
413 #define RDS_TRANS_TCP 2
414 #define RDS_TRANS_COUNT 3
416 struct rds_transport
{
417 char t_name
[TRANSNAMSIZ
];
418 struct list_head t_item
;
419 struct module
*t_owner
;
420 unsigned int t_prefer_loopback
:1;
423 int (*laddr_check
)(__be32 addr
);
424 int (*conn_alloc
)(struct rds_connection
*conn
, gfp_t gfp
);
425 void (*conn_free
)(void *data
);
426 int (*conn_connect
)(struct rds_connection
*conn
);
427 void (*conn_shutdown
)(struct rds_connection
*conn
);
428 void (*xmit_prepare
)(struct rds_connection
*conn
);
429 void (*xmit_complete
)(struct rds_connection
*conn
);
430 int (*xmit
)(struct rds_connection
*conn
, struct rds_message
*rm
,
431 unsigned int hdr_off
, unsigned int sg
, unsigned int off
);
432 int (*xmit_rdma
)(struct rds_connection
*conn
, struct rm_rdma_op
*op
);
433 int (*xmit_atomic
)(struct rds_connection
*conn
, struct rm_atomic_op
*op
);
434 int (*recv
)(struct rds_connection
*conn
);
435 int (*inc_copy_to_user
)(struct rds_incoming
*inc
, struct iovec
*iov
,
437 void (*inc_free
)(struct rds_incoming
*inc
);
439 int (*cm_handle_connect
)(struct rdma_cm_id
*cm_id
,
440 struct rdma_cm_event
*event
);
441 int (*cm_initiate_connect
)(struct rdma_cm_id
*cm_id
);
442 void (*cm_connect_complete
)(struct rds_connection
*conn
,
443 struct rdma_cm_event
*event
);
445 unsigned int (*stats_info_copy
)(struct rds_info_iterator
*iter
,
448 void *(*get_mr
)(struct scatterlist
*sg
, unsigned long nr_sg
,
449 struct rds_sock
*rs
, u32
*key_ret
);
450 void (*sync_mr
)(void *trans_private
, int direction
);
451 void (*free_mr
)(void *trans_private
, int invalidate
);
452 void (*flush_mrs
)(void);
462 * bound_addr used for both incoming and outgoing, no INADDR_ANY
465 struct hlist_node rs_bound_node
;
466 __be32 rs_bound_addr
;
468 __be16 rs_bound_port
;
470 struct rds_transport
*rs_transport
;
473 * rds_sendmsg caches the conn it used the last time around.
474 * This helps avoid costly lookups.
476 struct rds_connection
*rs_conn
;
478 /* flag indicating we were congested or not */
480 /* seen congestion (ENOBUFS) when sending? */
481 int rs_seen_congestion
;
483 /* rs_lock protects all these adjacent members before the newline */
485 struct list_head rs_send_queue
;
488 struct list_head rs_notify_queue
; /* currently used for failed RDMAs */
490 /* Congestion wake_up. If rs_cong_monitor is set, we use cong_mask
491 * to decide whether the application should be woken up.
492 * If not set, we use rs_cong_track to find out whether a cong map
495 uint64_t rs_cong_mask
;
496 uint64_t rs_cong_notify
;
497 struct list_head rs_cong_list
;
498 unsigned long rs_cong_track
;
501 * rs_recv_lock protects the receive queue, and is
502 * used to serialize with rds_release.
504 rwlock_t rs_recv_lock
;
505 struct list_head rs_recv_queue
;
507 /* just for stats reporting */
508 struct list_head rs_item
;
510 /* these have their own lock */
511 spinlock_t rs_rdma_lock
;
512 struct rb_root rs_rdma_keys
;
514 /* Socket options - in case there will be more */
515 unsigned char rs_recverr
,
519 static inline struct rds_sock
*rds_sk_to_rs(const struct sock
*sk
)
521 return container_of(sk
, struct rds_sock
, rs_sk
);
523 static inline struct sock
*rds_rs_to_sk(struct rds_sock
*rs
)
529 * The stack assigns sk_sndbuf and sk_rcvbuf to twice the specified value
530 * to account for overhead. We don't account for overhead, we just apply
531 * the number of payload bytes to the specified value.
533 static inline int rds_sk_sndbuf(struct rds_sock
*rs
)
535 return rds_rs_to_sk(rs
)->sk_sndbuf
/ 2;
537 static inline int rds_sk_rcvbuf(struct rds_sock
*rs
)
539 return rds_rs_to_sk(rs
)->sk_rcvbuf
/ 2;
542 struct rds_statistics
{
543 uint64_t s_conn_reset
;
544 uint64_t s_recv_drop_bad_checksum
;
545 uint64_t s_recv_drop_old_seq
;
546 uint64_t s_recv_drop_no_sock
;
547 uint64_t s_recv_drop_dead_sock
;
548 uint64_t s_recv_deliver_raced
;
549 uint64_t s_recv_delivered
;
550 uint64_t s_recv_queued
;
551 uint64_t s_recv_immediate_retry
;
552 uint64_t s_recv_delayed_retry
;
553 uint64_t s_recv_ack_required
;
554 uint64_t s_recv_rdma_bytes
;
555 uint64_t s_recv_ping
;
556 uint64_t s_send_queue_empty
;
557 uint64_t s_send_queue_full
;
558 uint64_t s_send_lock_contention
;
559 uint64_t s_send_lock_queue_raced
;
560 uint64_t s_send_immediate_retry
;
561 uint64_t s_send_delayed_retry
;
562 uint64_t s_send_drop_acked
;
563 uint64_t s_send_ack_required
;
564 uint64_t s_send_queued
;
565 uint64_t s_send_rdma
;
566 uint64_t s_send_rdma_bytes
;
567 uint64_t s_send_pong
;
568 uint64_t s_page_remainder_hit
;
569 uint64_t s_page_remainder_miss
;
570 uint64_t s_copy_to_user
;
571 uint64_t s_copy_from_user
;
572 uint64_t s_cong_update_queued
;
573 uint64_t s_cong_update_received
;
574 uint64_t s_cong_send_error
;
575 uint64_t s_cong_send_blocked
;
579 char *rds_str_array(char **array
, size_t elements
, size_t index
);
580 void rds_sock_addref(struct rds_sock
*rs
);
581 void rds_sock_put(struct rds_sock
*rs
);
582 void rds_wake_sk_sleep(struct rds_sock
*rs
);
583 static inline void __rds_wake_sk_sleep(struct sock
*sk
)
585 wait_queue_head_t
*waitq
= sk_sleep(sk
);
587 if (!sock_flag(sk
, SOCK_DEAD
) && waitq
)
590 extern wait_queue_head_t rds_poll_waitq
;
594 int rds_bind(struct socket
*sock
, struct sockaddr
*uaddr
, int addr_len
);
595 void rds_remove_bound(struct rds_sock
*rs
);
596 struct rds_sock
*rds_find_bound(__be32 addr
, __be16 port
);
599 int rds_cong_get_maps(struct rds_connection
*conn
);
600 void rds_cong_add_conn(struct rds_connection
*conn
);
601 void rds_cong_remove_conn(struct rds_connection
*conn
);
602 void rds_cong_set_bit(struct rds_cong_map
*map
, __be16 port
);
603 void rds_cong_clear_bit(struct rds_cong_map
*map
, __be16 port
);
604 int rds_cong_wait(struct rds_cong_map
*map
, __be16 port
, int nonblock
, struct rds_sock
*rs
);
605 void rds_cong_queue_updates(struct rds_cong_map
*map
);
606 void rds_cong_map_updated(struct rds_cong_map
*map
, uint64_t);
607 int rds_cong_updated_since(unsigned long *recent
);
608 void rds_cong_add_socket(struct rds_sock
*);
609 void rds_cong_remove_socket(struct rds_sock
*);
610 void rds_cong_exit(void);
611 struct rds_message
*rds_cong_update_alloc(struct rds_connection
*conn
);
614 int rds_conn_init(void);
615 void rds_conn_exit(void);
616 struct rds_connection
*rds_conn_create(__be32 laddr
, __be32 faddr
,
617 struct rds_transport
*trans
, gfp_t gfp
);
618 struct rds_connection
*rds_conn_create_outgoing(__be32 laddr
, __be32 faddr
,
619 struct rds_transport
*trans
, gfp_t gfp
);
620 void rds_conn_shutdown(struct rds_connection
*conn
);
621 void rds_conn_destroy(struct rds_connection
*conn
);
622 void rds_conn_drop(struct rds_connection
*conn
);
623 void rds_conn_connect_if_down(struct rds_connection
*conn
);
624 void rds_for_each_conn_info(struct socket
*sock
, unsigned int len
,
625 struct rds_info_iterator
*iter
,
626 struct rds_info_lengths
*lens
,
627 int (*visitor
)(struct rds_connection
*, void *),
629 void __rds_conn_error(struct rds_connection
*conn
, const char *, ...)
630 __attribute__ ((format (printf
, 2, 3)));
631 #define rds_conn_error(conn, fmt...) \
632 __rds_conn_error(conn, KERN_WARNING "RDS: " fmt)
635 rds_conn_transition(struct rds_connection
*conn
, int old
, int new)
637 return atomic_cmpxchg(&conn
->c_state
, old
, new) == old
;
641 rds_conn_state(struct rds_connection
*conn
)
643 return atomic_read(&conn
->c_state
);
647 rds_conn_up(struct rds_connection
*conn
)
649 return atomic_read(&conn
->c_state
) == RDS_CONN_UP
;
653 rds_conn_connecting(struct rds_connection
*conn
)
655 return atomic_read(&conn
->c_state
) == RDS_CONN_CONNECTING
;
659 struct rds_message
*rds_message_alloc(unsigned int nents
, gfp_t gfp
);
660 struct scatterlist
*rds_message_alloc_sgs(struct rds_message
*rm
, int nents
);
661 int rds_message_copy_from_user(struct rds_message
*rm
, struct iovec
*first_iov
,
663 struct rds_message
*rds_message_map_pages(unsigned long *page_addrs
, unsigned int total_len
);
664 void rds_message_populate_header(struct rds_header
*hdr
, __be16 sport
,
665 __be16 dport
, u64 seq
);
666 int rds_message_add_extension(struct rds_header
*hdr
,
667 unsigned int type
, const void *data
, unsigned int len
);
668 int rds_message_next_extension(struct rds_header
*hdr
,
669 unsigned int *pos
, void *buf
, unsigned int *buflen
);
670 int rds_message_add_rdma_dest_extension(struct rds_header
*hdr
, u32 r_key
, u32 offset
);
671 int rds_message_inc_copy_to_user(struct rds_incoming
*inc
,
672 struct iovec
*first_iov
, size_t size
);
673 void rds_message_inc_free(struct rds_incoming
*inc
);
674 void rds_message_addref(struct rds_message
*rm
);
675 void rds_message_put(struct rds_message
*rm
);
676 void rds_message_wait(struct rds_message
*rm
);
677 void rds_message_unmapped(struct rds_message
*rm
);
679 static inline void rds_message_make_checksum(struct rds_header
*hdr
)
682 hdr
->h_csum
= ip_fast_csum((void *) hdr
, sizeof(*hdr
) >> 2);
685 static inline int rds_message_verify_checksum(const struct rds_header
*hdr
)
687 return !hdr
->h_csum
|| ip_fast_csum((void *) hdr
, sizeof(*hdr
) >> 2) == 0;
692 int rds_page_remainder_alloc(struct scatterlist
*scat
, unsigned long bytes
,
694 int rds_page_copy_user(struct page
*page
, unsigned long offset
,
695 void __user
*ptr
, unsigned long bytes
,
697 #define rds_page_copy_to_user(page, offset, ptr, bytes) \
698 rds_page_copy_user(page, offset, ptr, bytes, 1)
699 #define rds_page_copy_from_user(page, offset, ptr, bytes) \
700 rds_page_copy_user(page, offset, ptr, bytes, 0)
701 void rds_page_exit(void);
704 void rds_inc_init(struct rds_incoming
*inc
, struct rds_connection
*conn
,
706 void rds_inc_put(struct rds_incoming
*inc
);
707 void rds_recv_incoming(struct rds_connection
*conn
, __be32 saddr
, __be32 daddr
,
708 struct rds_incoming
*inc
, gfp_t gfp
, enum km_type km
);
709 int rds_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
710 size_t size
, int msg_flags
);
711 void rds_clear_recv_queue(struct rds_sock
*rs
);
712 int rds_notify_queue_get(struct rds_sock
*rs
, struct msghdr
*msg
);
713 void rds_inc_info_copy(struct rds_incoming
*inc
,
714 struct rds_info_iterator
*iter
,
715 __be32 saddr
, __be32 daddr
, int flip
);
718 int rds_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
,
720 void rds_send_reset(struct rds_connection
*conn
);
721 int rds_send_xmit(struct rds_connection
*conn
);
723 void rds_send_drop_to(struct rds_sock
*rs
, struct sockaddr_in
*dest
);
724 typedef int (*is_acked_func
)(struct rds_message
*rm
, uint64_t ack
);
725 void rds_send_drop_acked(struct rds_connection
*conn
, u64 ack
,
726 is_acked_func is_acked
);
727 int rds_send_pong(struct rds_connection
*conn
, __be16 dport
);
728 struct rds_message
*rds_send_get_message(struct rds_connection
*,
729 struct rm_rdma_op
*);
732 void rds_rdma_unuse(struct rds_sock
*rs
, u32 r_key
, int force
);
733 int rds_get_mr(struct rds_sock
*rs
, char __user
*optval
, int optlen
);
734 int rds_get_mr_for_dest(struct rds_sock
*rs
, char __user
*optval
, int optlen
);
735 int rds_free_mr(struct rds_sock
*rs
, char __user
*optval
, int optlen
);
736 void rds_rdma_drop_keys(struct rds_sock
*rs
);
737 int rds_rdma_extra_size(struct rds_rdma_args
*args
);
738 int rds_cmsg_rdma_args(struct rds_sock
*rs
, struct rds_message
*rm
,
739 struct cmsghdr
*cmsg
);
740 int rds_cmsg_rdma_dest(struct rds_sock
*rs
, struct rds_message
*rm
,
741 struct cmsghdr
*cmsg
);
742 int rds_cmsg_rdma_args(struct rds_sock
*rs
, struct rds_message
*rm
,
743 struct cmsghdr
*cmsg
);
744 int rds_cmsg_rdma_map(struct rds_sock
*rs
, struct rds_message
*rm
,
745 struct cmsghdr
*cmsg
);
746 void rds_rdma_free_op(struct rm_rdma_op
*ro
);
747 void rds_atomic_free_op(struct rm_atomic_op
*ao
);
748 void rds_rdma_send_complete(struct rds_message
*rm
, int wc_status
);
749 void rds_atomic_send_complete(struct rds_message
*rm
, int wc_status
);
750 int rds_cmsg_atomic(struct rds_sock
*rs
, struct rds_message
*rm
,
751 struct cmsghdr
*cmsg
);
753 extern void __rds_put_mr_final(struct rds_mr
*mr
);
754 static inline void rds_mr_put(struct rds_mr
*mr
)
756 if (atomic_dec_and_test(&mr
->r_refcount
))
757 __rds_put_mr_final(mr
);
761 DECLARE_PER_CPU_SHARED_ALIGNED(struct rds_statistics
, rds_stats
);
762 #define rds_stats_inc_which(which, member) do { \
763 per_cpu(which, get_cpu()).member++; \
766 #define rds_stats_inc(member) rds_stats_inc_which(rds_stats, member)
767 #define rds_stats_add_which(which, member, count) do { \
768 per_cpu(which, get_cpu()).member += count; \
771 #define rds_stats_add(member, count) rds_stats_add_which(rds_stats, member, count)
772 int rds_stats_init(void);
773 void rds_stats_exit(void);
774 void rds_stats_info_copy(struct rds_info_iterator
*iter
,
775 uint64_t *values
, const char *const *names
,
779 int rds_sysctl_init(void);
780 void rds_sysctl_exit(void);
781 extern unsigned long rds_sysctl_sndbuf_min
;
782 extern unsigned long rds_sysctl_sndbuf_default
;
783 extern unsigned long rds_sysctl_sndbuf_max
;
784 extern unsigned long rds_sysctl_reconnect_min_jiffies
;
785 extern unsigned long rds_sysctl_reconnect_max_jiffies
;
786 extern unsigned int rds_sysctl_max_unacked_packets
;
787 extern unsigned int rds_sysctl_max_unacked_bytes
;
788 extern unsigned int rds_sysctl_ping_enable
;
789 extern unsigned long rds_sysctl_trace_flags
;
790 extern unsigned int rds_sysctl_trace_level
;
793 int rds_threads_init(void);
794 void rds_threads_exit(void);
795 extern struct workqueue_struct
*rds_wq
;
796 void rds_queue_reconnect(struct rds_connection
*conn
);
797 void rds_connect_worker(struct work_struct
*);
798 void rds_shutdown_worker(struct work_struct
*);
799 void rds_send_worker(struct work_struct
*);
800 void rds_recv_worker(struct work_struct
*);
801 void rds_connect_complete(struct rds_connection
*conn
);
804 int rds_trans_register(struct rds_transport
*trans
);
805 void rds_trans_unregister(struct rds_transport
*trans
);
806 struct rds_transport
*rds_trans_get_preferred(__be32 addr
);
807 void rds_trans_put(struct rds_transport
*trans
);
808 unsigned int rds_trans_stats_info_copy(struct rds_info_iterator
*iter
,
810 int rds_trans_init(void);
811 void rds_trans_exit(void);