2 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 * Connection Data Control (CDC)
6 * Copyright IBM Corp. 2016
8 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
14 #include <linux/kernel.h> /* max_t */
15 #include <linux/atomic.h>
17 #include <linux/compiler.h>
23 #define SMC_CDC_MSG_TYPE 0xFE
25 /* in network byte order */
26 union smc_cdc_cursor
{ /* SMC cursor */
32 #ifdef KERNEL_HAS_ATOMIC64
33 atomic64_t acurs
; /* for atomic processing */
35 u64 acurs
; /* for atomic processing */
39 /* in network byte order */
41 struct smc_wr_rx_hdr common
; /* .type = 0xFE */
45 union smc_cdc_cursor prod
;
46 union smc_cdc_cursor cons
; /* piggy backed "ack" */
47 struct smc_cdc_producer_flags prod_flags
;
48 struct smc_cdc_conn_state_flags conn_state_flags
;
52 static inline bool smc_cdc_rxed_any_close(struct smc_connection
*conn
)
54 return conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_abort
||
55 conn
->local_rx_ctrl
.conn_state_flags
.peer_conn_closed
;
58 static inline bool smc_cdc_rxed_any_close_or_senddone(
59 struct smc_connection
*conn
)
61 return smc_cdc_rxed_any_close(conn
) ||
62 conn
->local_rx_ctrl
.conn_state_flags
.peer_done_writing
;
65 static inline void smc_curs_add(int size
, union smc_host_cursor
*curs
,
69 if (curs
->count
>= size
) {
75 /* SMC cursors are 8 bytes long and require atomic reading and writing */
76 static inline u64
smc_curs_read(union smc_host_cursor
*curs
,
77 struct smc_connection
*conn
)
79 #ifndef KERNEL_HAS_ATOMIC64
83 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
85 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
88 return atomic64_read(&curs
->acurs
);
92 static inline u64
smc_curs_read_net(union smc_cdc_cursor
*curs
,
93 struct smc_connection
*conn
)
95 #ifndef KERNEL_HAS_ATOMIC64
99 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
101 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
104 return atomic64_read(&curs
->acurs
);
108 static inline void smc_curs_write(union smc_host_cursor
*curs
, u64 val
,
109 struct smc_connection
*conn
)
111 #ifndef KERNEL_HAS_ATOMIC64
114 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
116 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
118 atomic64_set(&curs
->acurs
, val
);
122 static inline void smc_curs_write_net(union smc_cdc_cursor
*curs
, u64 val
,
123 struct smc_connection
*conn
)
125 #ifndef KERNEL_HAS_ATOMIC64
128 spin_lock_irqsave(&conn
->acurs_lock
, flags
);
130 spin_unlock_irqrestore(&conn
->acurs_lock
, flags
);
132 atomic64_set(&curs
->acurs
, val
);
136 /* calculate cursor difference between old and new, where old <= new */
137 static inline int smc_curs_diff(unsigned int size
,
138 union smc_host_cursor
*old
,
139 union smc_host_cursor
*new)
141 if (old
->wrap
!= new->wrap
)
143 ((size
- old
->count
) + new->count
));
145 return max_t(int, 0, (new->count
- old
->count
));
148 static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor
*peer
,
149 union smc_host_cursor
*local
,
150 struct smc_connection
*conn
)
152 union smc_host_cursor temp
;
154 smc_curs_write(&temp
, smc_curs_read(local
, conn
), conn
);
155 peer
->count
= htonl(temp
.count
);
156 peer
->wrap
= htons(temp
.wrap
);
157 /* peer->reserved = htons(0); must be ensured by caller */
160 static inline void smc_host_msg_to_cdc(struct smc_cdc_msg
*peer
,
161 struct smc_host_cdc_msg
*local
,
162 struct smc_connection
*conn
)
164 peer
->common
.type
= local
->common
.type
;
165 peer
->len
= local
->len
;
166 peer
->seqno
= htons(local
->seqno
);
167 peer
->token
= htonl(local
->token
);
168 smc_host_cursor_to_cdc(&peer
->prod
, &local
->prod
, conn
);
169 smc_host_cursor_to_cdc(&peer
->cons
, &local
->cons
, conn
);
170 peer
->prod_flags
= local
->prod_flags
;
171 peer
->conn_state_flags
= local
->conn_state_flags
;
174 static inline void smc_cdc_cursor_to_host(union smc_host_cursor
*local
,
175 union smc_cdc_cursor
*peer
,
176 struct smc_connection
*conn
)
178 union smc_host_cursor temp
, old
;
179 union smc_cdc_cursor net
;
181 smc_curs_write(&old
, smc_curs_read(local
, conn
), conn
);
182 smc_curs_write_net(&net
, smc_curs_read_net(peer
, conn
), conn
);
183 temp
.count
= ntohl(net
.count
);
184 temp
.wrap
= ntohs(net
.wrap
);
185 if ((old
.wrap
> temp
.wrap
) && temp
.wrap
)
187 if ((old
.wrap
== temp
.wrap
) &&
188 (old
.count
> temp
.count
))
190 smc_curs_write(local
, smc_curs_read(&temp
, conn
), conn
);
193 static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg
*local
,
194 struct smc_cdc_msg
*peer
,
195 struct smc_connection
*conn
)
197 local
->common
.type
= peer
->common
.type
;
198 local
->len
= peer
->len
;
199 local
->seqno
= ntohs(peer
->seqno
);
200 local
->token
= ntohl(peer
->token
);
201 smc_cdc_cursor_to_host(&local
->prod
, &peer
->prod
, conn
);
202 smc_cdc_cursor_to_host(&local
->cons
, &peer
->cons
, conn
);
203 local
->prod_flags
= peer
->prod_flags
;
204 local
->conn_state_flags
= peer
->conn_state_flags
;
207 struct smc_cdc_tx_pend
;
209 int smc_cdc_get_free_slot(struct smc_link
*link
, struct smc_wr_buf
**wr_buf
,
210 struct smc_cdc_tx_pend
**pend
);
211 void smc_cdc_tx_dismiss_slots(struct smc_connection
*conn
);
212 int smc_cdc_msg_send(struct smc_connection
*conn
, struct smc_wr_buf
*wr_buf
,
213 struct smc_cdc_tx_pend
*pend
);
214 int smc_cdc_get_slot_and_msg_send(struct smc_connection
*conn
);
215 bool smc_cdc_tx_has_pending(struct smc_connection
*conn
);
216 int smc_cdc_init(void) __init
;
218 #endif /* SMC_CDC_H */