add keepalive
[cor.git] / net / cor / conn_src_sock.c
blob268e5b9f3b946eeaa11cd1cb80d1cea949c5ea71
1 /**
2 * Connection oriented routing
3 * Copyright (C) 2007-2021 Michael Blizek
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA.
21 #include "cor.h"
23 static DEFINE_SPINLOCK(cor_flushtoconn_oom_lock);
24 static LIST_HEAD(cor_flushtoconn_oom_list);
25 static struct delayed_work cor_flushtoconn_oom_work;
28 static void cor_keepalive_req_workfunc(struct work_struct *work);
30 static DEFINE_SPINLOCK(cor_keepalive_req_lock);
31 DECLARE_WORK(cor_keepalive_req_work, cor_keepalive_req_workfunc);
32 static LIST_HEAD(cor_keepalive_req_list);
35 static void _cor_update_src_sock_sndspeed(struct cor_snd_speed *snd_speed,
36 __u32 timediff_jiffies)
38 __u32 timediff_us = jiffies_to_usecs(timediff_jiffies);
40 __u64 curr_sndspeed;
42 if (timediff_us == 0)
43 return;
45 curr_sndspeed = div64_u64(snd_speed->bytes_sent * 1000000LL,
46 timediff_us);
48 snd_speed->speed = snd_speed->speed - snd_speed->speed/8 +
49 ((__u32) min((__u64) U32_MAX, curr_sndspeed) / 8);
51 BUG_ON(SOCK_SNDSPEED_MAX > U32_MAX);
52 snd_speed->speed_limited = snd_speed->speed_limited -
53 snd_speed->speed_limited/8 + ((__u32)
54 min((__u64) SOCK_SNDSPEED_MAX, curr_sndspeed) / 8);
57 void cor_update_src_sock_sndspeed(struct cor_conn *src_sock_l, __u32 bytes_sent)
59 struct cor_snd_speed *snd_speed = &(src_sock_l->source.sock.snd_speed);
60 __u32 update_interval_jiffies = HZ/10;
61 unsigned long jiffies_tmp = jiffies;
63 if (src_sock_l->targettype == TARGET_OUT &&
64 src_sock_l->target.out.nb != 0) {
65 struct cor_neighbor *nb = src_sock_l->target.out.nb;
67 __u32 lat = atomic_read(&(nb->latency_retrans_us));
68 __u32 stddev = atomic_read(&(nb->latency_stddev_retrans_us));
69 __u32 delay;
70 if (src_sock_l->is_highlatency)
71 delay = atomic_read(&(nb->max_remote_ackconn_highlat_delay_us));
72 else
73 delay = atomic_read(&(nb->max_remote_ackconn_lowlat_delay_us));
75 update_interval_jiffies = max(update_interval_jiffies,
76 (__u32) msecs_to_jiffies(lat/1000 + stddev/500 +
77 delay/1000));
80 if (snd_speed->flushed != 0 && unlikely(time_after(jiffies_tmp,
81 snd_speed->jiffies_last_refresh +
82 update_interval_jiffies*2))) {
83 snd_speed->state = SNDSPEED_INIT;
84 snd_speed->jiffies_last_refresh = jiffies_tmp;
87 if (snd_speed->state == SNDSPEED_INIT) {
88 if (time_after(jiffies_tmp, snd_speed->jiffies_last_refresh +
89 update_interval_jiffies)) {
90 snd_speed->state = SNDSPEED_ACTIVE;
91 snd_speed->jiffies_last_refresh = jiffies_tmp;
92 snd_speed->bytes_sent = 0;
94 } else if (likely(snd_speed->state == SNDSPEED_ACTIVE)) {
95 if (unlikely(snd_speed->bytes_sent + bytes_sent <
96 snd_speed->bytes_sent))
97 snd_speed->bytes_sent = U32_MAX;
98 else
99 snd_speed->bytes_sent += bytes_sent;
101 if (time_after(jiffies_tmp, snd_speed->jiffies_last_refresh +
102 update_interval_jiffies)) {
103 _cor_update_src_sock_sndspeed(snd_speed, jiffies_tmp -
104 snd_speed->jiffies_last_refresh);
105 snd_speed->jiffies_last_refresh = jiffies_tmp;
106 snd_speed->bytes_sent = 0;
108 } else {
109 BUG();
112 snd_speed->flushed = src_sock_l->flush;
115 int cor_sock_sndbufavailable(struct cor_conn *src_sock_lx, int for_wakeup)
117 /* printk(KERN_ERR "sndbuf %p %u %u\n", src_sock_lx,
118 (src_sock_lx->bufsize.bufsize >> BUFSIZE_SHIFT),
119 src_sock_lx->data_buf.read_remaining); */
120 __u32 limit = src_sock_lx->bufsize.bufsize >> BUFSIZE_SHIFT;
122 if (src_sock_lx->targettype == TARGET_OUT &&
123 cor_seqno_after(
124 src_sock_lx->target.out.seqno_windowlimit,
125 src_sock_lx->target.out.seqno_nextsend +
126 src_sock_lx->data_buf.read_remaining)) {
127 __u32 windowleft = (__u32) min((__u64) U32_MAX,
128 cor_seqno_clean(
129 src_sock_lx->target.out.seqno_windowlimit -
130 src_sock_lx->target.out.seqno_nextsend -
131 src_sock_lx->data_buf.read_remaining));
133 limit = max(limit, min(windowleft,
134 (__u32) WINDOW_MAX_PER_CONN_MIN_OUT_WINOK));
138 * High cpu usage may cause high latency of the userspace sender.
139 * Increasing bufferspace to compensate may increase latency further.
141 if (src_sock_lx->is_highlatency == 0) {
142 limit = min(limit,
143 src_sock_lx->source.sock.snd_speed.speed / 20);
144 limit = max(limit,
145 src_sock_lx->source.sock.snd_speed.speed / 512);
146 } else {
147 limit = max(limit,
148 src_sock_lx->source.sock.snd_speed.speed / 64);
151 /* reduce number of wakeups to reduce cpu usage */
152 if (for_wakeup)
153 limit -= limit/4;
155 /* ">=" ... return 1 if read_remaining == 0 */
156 return limit >= src_sock_lx->data_buf.read_remaining ? 1 : 0;
160 static void cor_update_flushtoconn_oom_list(struct cor_conn *src_sock_l,
161 int oom)
163 BUG_ON(src_sock_l->sourcetype != SOURCE_SOCK);
165 if (unlikely(oom != 0)) {
166 if (src_sock_l->source.sock.in_flushtoconn_oom_list != 0)
167 return;
169 spin_lock_bh(&cor_flushtoconn_oom_lock);
171 if (list_empty(&cor_flushtoconn_oom_list)) {
172 schedule_delayed_work(&cor_flushtoconn_oom_work,
173 msecs_to_jiffies(100));
176 list_add_tail(&(src_sock_l->source.sock.flushtoconn_oom_lh),
177 &cor_flushtoconn_oom_list);
178 src_sock_l->source.sock.in_flushtoconn_oom_list = 1;
180 spin_unlock_bh(&cor_flushtoconn_oom_lock);
182 cor_conn_kref_get(src_sock_l, "flushtoconn_oom_list");
183 } else {
184 if (likely(src_sock_l->source.sock.in_flushtoconn_oom_list ==
186 return;
188 spin_lock_bh(&cor_flushtoconn_oom_lock);
189 list_del(&(src_sock_l->source.sock.flushtoconn_oom_lh));
190 src_sock_l->source.sock.in_flushtoconn_oom_list = 0;
192 if (list_empty(&cor_flushtoconn_oom_list))
193 cancel_delayed_work(&cor_flushtoconn_oom_work);
195 spin_unlock_bh(&cor_flushtoconn_oom_lock);
197 cor_conn_kref_put_bug(src_sock_l, "flushtoconn_oom_list");
199 if (likely(src_sock_l->source.sock.cs != 0)) {
200 cor_sk_write_space(src_sock_l->source.sock.cs);
205 static int __cor_mngdsocket_flushtoconn(struct cor_conn *src_sock_l,
206 char *snd_hdr, __u16 snd_hdr_len,
207 char *snd_data, __u16 snd_data_len,
208 char *snd_chksum, __u16 snd_chksum_len,
209 __u8 flush)
211 if (likely(src_sock_l->source.sock.sent < snd_hdr_len)) {
212 __u32 off = src_sock_l->source.sock.sent;
213 __u32 len = snd_hdr_len - off;
215 __u32 rc = cor_receive_sock(src_sock_l, snd_hdr + off, len,
218 src_sock_l->source.sock.sent += rc;
220 if (unlikely(rc < len))
221 return RC_FTC_OOM;
224 if (likely(src_sock_l->source.sock.sent - snd_hdr_len <
225 snd_data_len)) {
226 __u32 off = src_sock_l->source.sock.sent -
227 snd_hdr_len;
228 __u32 len = snd_data_len - off;
230 __u32 rc = cor_receive_sock(src_sock_l, snd_data + off, len,
233 src_sock_l->source.sock.sent += rc;
235 if (unlikely(rc < len))
236 return RC_FTC_OOM;
239 if (likely(src_sock_l->source.sock.sent - snd_hdr_len -
240 snd_data_len < snd_chksum_len)) {
241 __u32 off = src_sock_l->source.sock.sent -
242 snd_hdr_len - snd_data_len;
243 __u32 len = CONN_MNGD_CHECKSUMLEN - off;
245 __u32 rc = cor_receive_sock(src_sock_l, snd_chksum + off, len,
246 flush);
248 src_sock_l->source.sock.sent += rc;
250 if (unlikely(rc < len))
251 return RC_FTC_OOM;
254 BUG_ON(src_sock_l->source.sock.sent > snd_hdr_len +
255 ((__u32) snd_data_len) + snd_chksum_len);
257 #ifdef COR_DEBUG
258 if (1) {
259 char checksum_calc[CONN_MNGD_CHECKSUMLEN];
260 BUG_ON(CONN_MNGD_CHECKSUMLEN != snd_chksum_len);
262 cor_mngdsocket_chksum(snd_hdr, snd_hdr_len,
263 snd_data, snd_data_len,
264 checksum_calc, snd_chksum_len);
266 WARN_ON(memcmp(snd_chksum, checksum_calc,
267 snd_chksum_len) != 0);
269 #endif
271 return RC_FTC_OK;
274 static void _cor_mngdsocket_flushtoconn_fill_ctrl(struct cor_conn *src_sock_l)
276 __u16 hdr = 0;
278 if (unlikely(src_sock_l->source.sock.send_eof_needed != 0) &&
279 src_sock_l->source.sock.buf_data_filled == 0) {
280 hdr |= CONN_MNGD_EOF;
281 src_sock_l->source.sock.send_eof_needed = 0;
284 if (unlikely(src_sock_l->source.sock.send_rcvend_needed != 0)) {
285 hdr |= CONN_MNGD_RCVEND;
286 src_sock_l->source.sock.send_rcvend_needed = 0;
289 if (unlikely(src_sock_l->source.sock.send_keepalive_req_needed != 0)) {
290 hdr |= CONN_MNGD_KEEPALIVE_REQ;
291 src_sock_l->source.sock.send_keepalive_req_needed = 0;
294 if (unlikely(src_sock_l->source.sock.send_keepalive_resp_needed != 0)) {
295 hdr |= CONN_MNGD_KEEPALIVE_RESP;
296 src_sock_l->source.sock.send_keepalive_resp_needed = 0;
299 if (likely(hdr == 0))
300 return;
302 BUILD_BUG_ON(CONN_MNGD_HEADERLEN != 2);
303 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_ctrl.snd_hdr) !=
304 CONN_MNGD_HEADERLEN);
305 BUILD_BUG_ON(CONN_MNGD_MAX_CTRL_DATALEN != 8);
306 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_ctrl.snd_data) !=
307 CONN_MNGD_MAX_CTRL_DATALEN);
308 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_ctrl.snd_chksum) !=
309 CONN_MNGD_CHECKSUMLEN);
311 cor_put_u16(&(src_sock_l->source.sock.buf_ctrl.snd_hdr[0]), hdr);
313 src_sock_l->source.sock.buf_ctrl.snd_data_len = 0;
315 if ((hdr & CONN_MNGD_KEEPALIVE_REQ) != 0) {
316 BUG_ON(src_sock_l->source.sock.buf_ctrl.snd_data_len + 4 >
317 CONN_MNGD_MAX_CTRL_DATALEN);
318 cor_put_be32(&(src_sock_l->source.sock.buf_ctrl.snd_data[0]) +
319 src_sock_l->source.sock.buf_ctrl.snd_data_len,
320 src_sock_l->source.sock.keepalive_req_cookie);
321 src_sock_l->source.sock.buf_ctrl.snd_data_len += 4;
324 if ((hdr & CONN_MNGD_KEEPALIVE_RESP) != 0) {
325 BUG_ON(src_sock_l->source.sock.buf_ctrl.snd_data_len + 4 >
326 CONN_MNGD_MAX_CTRL_DATALEN);
327 cor_put_be32(&(src_sock_l->source.sock.buf_ctrl.snd_data[0]) +
328 src_sock_l->source.sock.buf_ctrl.snd_data_len,
329 src_sock_l->source.sock.keepalive_resp_cookie);
330 src_sock_l->source.sock.buf_ctrl.snd_data_len += 4;
333 cor_mngdsocket_chksum(
334 &(src_sock_l->source.sock.buf_ctrl.snd_hdr[0]),
335 CONN_MNGD_HEADERLEN,
336 &(src_sock_l->source.sock.buf_ctrl.snd_data[0]),
337 src_sock_l->source.sock.buf_ctrl.snd_data_len,
338 &(src_sock_l->source.sock.buf_ctrl.snd_chksum[0]),
339 CONN_MNGD_CHECKSUMLEN);
341 src_sock_l->source.sock.buf_ctrl_filled = 1;
344 static int _cor_mngdsocket_flushtoconn_ctrl(struct cor_conn *src_sock_l)
346 int rc;
348 __u16 snd_data_len = 0;
349 __u8 flush = 0;
351 BUG_ON(snd_data_len >
352 65535 - CONN_MNGD_HEADERLEN - CONN_MNGD_CHECKSUMLEN);
353 BUG_ON(snd_data_len >
354 sizeof(src_sock_l->source.sock.buf_ctrl.snd_data));
356 if (src_sock_l->source.sock.flush != 0 &&
357 src_sock_l->source.sock.buf_data_filled == 0 &&
358 src_sock_l->source.sock.send_eof_needed == 0 &&
359 src_sock_l->source.sock.send_rcvend_needed == 0) {
360 flush = 1;
363 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_ctrl.snd_hdr) !=
364 CONN_MNGD_HEADERLEN);
365 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_ctrl.snd_chksum) !=
366 CONN_MNGD_CHECKSUMLEN);
368 rc = __cor_mngdsocket_flushtoconn(src_sock_l,
369 &(src_sock_l->source.sock.buf_ctrl.snd_hdr[0]),
370 CONN_MNGD_HEADERLEN,
371 &(src_sock_l->source.sock.buf_ctrl.snd_data[0]),
372 src_sock_l->source.sock.buf_ctrl.snd_data_len,
373 &(src_sock_l->source.sock.buf_ctrl.snd_chksum[0]),
374 CONN_MNGD_CHECKSUMLEN,
375 flush);
377 if (likely(rc == RC_FTC_OK)) {
378 memset(&(src_sock_l->source.sock.buf_ctrl), 0,
379 sizeof(src_sock_l->source.sock.buf_ctrl));
380 src_sock_l->source.sock.buf_ctrl_filled = 0;
381 src_sock_l->source.sock.sent = 0;
384 BUG_ON(src_sock_l->source.sock.sent > CONN_MNGD_HEADERLEN +
385 ((__u32) snd_data_len) + CONN_MNGD_CHECKSUMLEN);
387 cor_flush_buf(src_sock_l);
389 return rc;
392 static int _cor_mngdsocket_flushtoconn_data(struct cor_conn *src_sock_l)
394 int rc;
396 __u8 flush = 0;
398 BUG_ON(src_sock_l->source.sock.buf_data.snd_data_len >
399 65535 - CONN_MNGD_HEADERLEN - CONN_MNGD_CHECKSUMLEN);
400 BUG_ON(src_sock_l->source.sock.buf_data.snd_data_len >
401 CONN_MNGD_MAX_SEGMENT_SIZE);
403 if (src_sock_l->source.sock.flush != 0 &&
404 likely(src_sock_l->source.sock.send_eof_needed == 0) &&
405 likely(src_sock_l->source.sock.send_rcvend_needed == 0)
407 flush = 1;
410 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_data.snd_hdr) !=
411 CONN_MNGD_HEADERLEN);
412 BUILD_BUG_ON(sizeof(src_sock_l->source.sock.buf_data.snd_chksum) !=
413 CONN_MNGD_CHECKSUMLEN);
415 rc = __cor_mngdsocket_flushtoconn(src_sock_l,
416 &(src_sock_l->source.sock.buf_data.snd_hdr[0]),
417 CONN_MNGD_HEADERLEN,
418 src_sock_l->source.sock.buf_data.snd_data,
419 src_sock_l->source.sock.buf_data.snd_data_len,
420 &(src_sock_l->source.sock.buf_data.snd_chksum[0]),
421 CONN_MNGD_CHECKSUMLEN,
422 flush);
424 if (likely(rc == RC_FTC_OK)) {
425 memset(&(src_sock_l->source.sock.buf_data), 0,
426 sizeof(src_sock_l->source.sock.buf_ctrl));
427 src_sock_l->source.sock.buf_data_filled = 0;
428 src_sock_l->source.sock.sent = 0;
431 BUG_ON(src_sock_l->source.sock.sent > CONN_MNGD_HEADERLEN +
432 ((__u32) src_sock_l->source.sock.buf_data.snd_data_len)
433 + CONN_MNGD_CHECKSUMLEN);
435 cor_flush_buf(src_sock_l);
437 return rc;
440 int _cor_mngdsocket_flushtoconn(struct cor_conn *src_sock_l)
442 BUG_ON(src_sock_l->sourcetype != SOURCE_SOCK);
444 if (unlikely(src_sock_l->isreset != 0))
445 return RC_FTC_OK;
447 if (unlikely(src_sock_l->source.sock.buf_ctrl_filled != 0)) {
448 int rc = _cor_mngdsocket_flushtoconn_ctrl(src_sock_l);
449 if (unlikely(rc != RC_FTC_OK))
450 return rc;
453 if (src_sock_l->source.sock.buf_data_filled != 0) {
454 int rc = _cor_mngdsocket_flushtoconn_data(src_sock_l);
455 if (unlikely(rc != RC_FTC_OK))
456 return rc;
459 BUG_ON(src_sock_l->source.sock.buf_ctrl_filled != 0);
460 BUG_ON(src_sock_l->source.sock.buf_data_filled != 0);
461 _cor_mngdsocket_flushtoconn_fill_ctrl(src_sock_l);
462 if (unlikely(src_sock_l->source.sock.buf_ctrl_filled != 0)) {
463 int rc = _cor_mngdsocket_flushtoconn_ctrl(src_sock_l);
464 if (unlikely(rc != RC_FTC_OK))
465 return rc;
468 return RC_FTC_OK;
471 static void cor_mngdsocket_flushtoconn_oomresume(struct work_struct *work)
473 int rc = RC_FTC_OK;
475 while (rc != RC_FTC_OOM) {
476 struct cor_conn *src_sock_o;
478 spin_lock_bh(&cor_flushtoconn_oom_lock);
480 if (list_empty(&(cor_flushtoconn_oom_list))) {
481 spin_unlock_bh(&cor_flushtoconn_oom_lock);
482 break;
485 src_sock_o = container_of(cor_flushtoconn_oom_list.next,
486 struct cor_conn,
487 source.sock.flushtoconn_oom_lh);
489 BUG_ON(src_sock_o == 0);
491 cor_conn_kref_get(src_sock_o, "stack");
493 spin_unlock_bh(&cor_flushtoconn_oom_lock);
495 spin_lock_bh(&(src_sock_o->rcv_lock));
496 BUG_ON(src_sock_o->sourcetype != SOURCE_SOCK);
497 BUG_ON(src_sock_o->source.sock.in_flushtoconn_oom_list == 0);
498 rc = _cor_mngdsocket_flushtoconn(src_sock_o);
499 if (likely(rc != RC_FTC_OOM &&
500 src_sock_o->source.sock.cs != 0)) {
501 cor_sk_write_space(src_sock_o->source.sock.cs);
503 spin_unlock_bh(&(src_sock_o->rcv_lock));
505 cor_conn_kref_put(src_sock_o, "stack");
508 if (rc == RC_FTC_OOM) {
509 schedule_delayed_work(&cor_flushtoconn_oom_work,
510 msecs_to_jiffies(100));
514 void cor_mngdsocket_flushtoconn_ctrl_send_keepalive_req(
515 struct cor_conn *src_sock_l)
517 int rc;
519 BUG_ON(src_sock_l->source.sock.keepalive_intransit != 0);
520 BUG_ON(src_sock_l->source.sock.send_keepalive_req_needed != 0);
522 get_random_bytes((char *)
523 &(src_sock_l->source.sock.keepalive_req_cookie),
524 sizeof(src_sock_l->source.sock.keepalive_req_cookie));
525 src_sock_l->source.sock.send_keepalive_req_needed = 1;
527 src_sock_l->source.sock.keepalive_intransit = 1;
528 src_sock_l->source.sock.jiffies_keepalive_lastact = jiffies;
530 rc = _cor_mngdsocket_flushtoconn(src_sock_l);
531 cor_update_flushtoconn_oom_list(src_sock_l, rc == RC_FTC_OOM);
534 int cor_mngdsocket_flushtoconn_ctrl(struct cor_sock *cs_m_l, __u8 send_eof,
535 __u8 send_rcvend, __u8 send_keepalive_resp,
536 __be32 keepalive_resp_cookie)
538 int rc = RC_FTC_OK;
540 struct cor_conn *src_sock;
542 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
543 src_sock = cs_m_l->data.conn_managed.src_sock;
545 if (unlikely(src_sock == 0))
546 return RC_FTC_ERR;
548 spin_lock_bh(&(src_sock->rcv_lock));
550 if (unlikely(cor_is_src_sock(src_sock, cs_m_l) == 0 ||
551 src_sock->isreset != 0)) {
552 cs_m_l->data.conn_managed.is_reset = 1;
553 cor_sk_data_ready(cs_m_l);
554 rc = RC_FTC_ERR;
555 cor_flush_buf(src_sock);
556 goto out_err;
559 if (send_eof != 0) {
560 src_sock->source.sock.send_eof_needed = 1;
561 src_sock->source.sock.flush = 1;
563 if (send_rcvend != 0)
564 src_sock->source.sock.send_rcvend_needed = 1;
566 if (send_keepalive_resp != 0 &&
567 src_sock->source.sock.send_keepalive_resp_needed == 0) {
568 src_sock->source.sock.keepalive_resp_cookie =
569 keepalive_resp_cookie;
570 src_sock->source.sock.send_keepalive_resp_needed = 1;
573 rc = _cor_mngdsocket_flushtoconn(src_sock);
575 out_err:
576 cor_update_flushtoconn_oom_list(src_sock, rc == RC_FTC_OOM);
578 spin_unlock_bh(&(src_sock->rcv_lock));
580 return rc;
583 int cor_mngdsocket_flushtoconn_data(struct cor_sock *cs_m_l)
585 int rc = RC_FTC_OK;
587 struct cor_conn *src_sock = cs_m_l->data.conn_managed.src_sock;
589 if (unlikely(src_sock == 0))
590 return RC_FTC_ERR;
592 spin_lock_bh(&(src_sock->rcv_lock));
594 if (unlikely(cor_is_src_sock(src_sock, cs_m_l) == 0 ||
595 src_sock->isreset != 0)) {
596 cs_m_l->data.conn_managed.is_reset = 1;
597 cor_sk_data_ready(cs_m_l);
598 rc = RC_FTC_ERR;
599 cor_flush_buf(src_sock);
600 goto out;
603 src_sock->source.sock.flush = cs_m_l->data.conn_managed.flush;
605 if (unlikely(cs_m_l->data.conn_managed.send_in_progress != 0)) {
606 if (src_sock->source.sock.buf_data_filled == 0) {
607 cs_m_l->data.conn_managed.send_in_progress = 0;
608 } else {
609 rc = RC_FTC_OOM;
611 goto out;
614 BUG_ON(src_sock->source.sock.buf_data_filled != 0);
615 BUG_ON(cs_m_l->data.conn_managed.snd_data_len == 0);
616 BUG_ON(cs_m_l->data.conn_managed.snd_data_len >
617 CONN_MNGD_MAX_SEGMENT_SIZE);
619 BUILD_BUG_ON(CONN_MNGD_HEADERLEN != 2);
620 BUILD_BUG_ON(sizeof(src_sock->source.sock.buf_data.snd_hdr) !=
621 CONN_MNGD_HEADERLEN);
622 BUILD_BUG_ON(sizeof(src_sock->source.sock.buf_data.snd_chksum) !=
623 CONN_MNGD_CHECKSUMLEN);
626 cor_put_u16(&(src_sock->source.sock.buf_data.snd_hdr[0]),
627 CONN_MNGD_HASDATA |
628 (cs_m_l->data.conn_managed.snd_data_len - 1));
630 src_sock->source.sock.buf_data.snd_data =
631 cs_m_l->data.conn_managed.snd_buf;
632 src_sock->source.sock.buf_data.snd_data_len =
633 cs_m_l->data.conn_managed.snd_data_len;
635 cor_mngdsocket_chksum(
636 &(src_sock->source.sock.buf_data.snd_hdr[0]),
637 CONN_MNGD_HEADERLEN,
638 &(src_sock->source.sock.buf_data.snd_data[0]),
639 src_sock->source.sock.buf_data.snd_data_len,
640 &(src_sock->source.sock.buf_data.snd_chksum[0]),
641 CONN_MNGD_CHECKSUMLEN);
642 src_sock->source.sock.buf_data_filled = 1;
644 rc = _cor_mngdsocket_flushtoconn(src_sock);
646 if (unlikely(rc != RC_FTC_OK)) {
647 cs_m_l->data.conn_managed.send_in_progress = 1;
648 } else {
649 BUG_ON(src_sock->source.sock.buf_data_filled != 0);
650 cs_m_l->data.conn_managed.snd_data_len = 0;
653 out:
654 cor_update_flushtoconn_oom_list(src_sock, rc == RC_FTC_OOM);
656 spin_unlock_bh(&(src_sock->rcv_lock));
658 return rc;
661 static void cor_keepalive_req_workfunc(struct work_struct *work)
663 while (1) {
664 unsigned long iflags;
665 struct cor_conn *src_sock_o;
667 spin_lock_irqsave(&cor_keepalive_req_lock, iflags);
669 if (list_empty(&cor_keepalive_req_list)) {
670 spin_unlock_irqrestore(&cor_keepalive_req_lock, iflags);
671 break;
674 src_sock_o = container_of(cor_keepalive_req_list.next,
675 struct cor_conn, source.sock.keepalive_lh);
677 list_del(&(src_sock_o->source.sock.keepalive_lh));
678 src_sock_o->source.sock.in_keepalive_list = 0;
680 spin_unlock_irqrestore(&cor_keepalive_req_lock, iflags);
683 spin_lock_bh(&(src_sock_o->rcv_lock));
685 BUG_ON(src_sock_o->sourcetype != SOURCE_SOCK);
687 if (likely(src_sock_o->isreset != 0)) {
688 spin_unlock_bh(&(src_sock_o->rcv_lock));
689 } else if (src_sock_o->source.sock.keepalive_intransit == 0) {
690 cor_mngdsocket_flushtoconn_ctrl_send_keepalive_req(
691 src_sock_o);
692 cor_keepalive_req_sched_timer(src_sock_o);
693 spin_unlock_bh(&(src_sock_o->rcv_lock));
694 } else {
695 spin_unlock_bh(&(src_sock_o->rcv_lock));
696 cor_reset_conn(src_sock_o);
699 cor_conn_kref_put(src_sock_o, "keepalive_snd_list");
703 void cor_keepalive_req_timerfunc(struct timer_list *retrans_conn_timer)
705 struct cor_conn *src_sock_o = container_of(retrans_conn_timer,
706 struct cor_conn, source.sock.keepalive_timer);
707 spin_lock_bh(&(src_sock_o->rcv_lock));
709 BUG_ON(src_sock_o->sourcetype != SOURCE_SOCK);
711 if (likely(src_sock_o->isreset == 0) &&
712 src_sock_o->source.sock.in_keepalive_list == 0) {
713 unsigned long iflags;
714 int schedule_work_needed;
716 spin_lock_irqsave(&cor_keepalive_req_lock, iflags);
718 schedule_work_needed = list_empty(&cor_keepalive_req_list);
720 list_add_tail(&(src_sock_o->source.sock.keepalive_lh),
721 &cor_keepalive_req_list);
722 src_sock_o->source.sock.in_keepalive_list = 1;
723 cor_conn_kref_get(src_sock_o, "keepalive_snd_list");
725 if (schedule_work_needed)
726 schedule_work(&cor_keepalive_req_work);
728 spin_unlock_irqrestore(&cor_keepalive_req_lock, iflags);
731 spin_unlock_bh(&(src_sock_o->rcv_lock));
733 cor_conn_kref_put(src_sock_o, "keepalive_snd_timer");
736 void cor_keepalive_req_sched_timer(struct cor_conn *src_sock_lx)
738 unsigned long timeout;
740 BUG_ON(src_sock_lx->sourcetype != SOURCE_SOCK);
742 BUG_ON(src_sock_lx->source.sock.socktype != SOCKTYPE_MANAGED);
744 if (src_sock_lx->source.sock.keepalive_intransit == 0) {
745 timeout = src_sock_lx->source.sock.jiffies_keepalive_lastact +
746 KEEPALIVE_INTERVAL_SECS * HZ;
747 } else {
748 timeout = src_sock_lx->source.sock.jiffies_keepalive_lastact +
749 KEEPALIVE_TIMEOUT_SECS * HZ;
752 if (mod_timer(&(src_sock_lx->source.sock.keepalive_timer), timeout) ==
754 cor_conn_kref_get(src_sock_lx, "keepalive_snd_timer");
757 void cor_keepalive_resp_rcvd(struct cor_sock *cs_m_l, __be32 cookie)
759 int reset_needed = 0;
760 struct cor_conn *src_sock;
762 BUG_ON(cs_m_l->type != CS_TYPE_CONN_MANAGED);
763 src_sock = cs_m_l->data.conn_managed.src_sock;
765 if (unlikely(src_sock == 0))
766 return;
768 spin_lock_bh(&(src_sock->rcv_lock));
770 if (unlikely(cor_is_src_sock(src_sock, cs_m_l) == 0 ||
771 src_sock->isreset != 0)) {
772 cs_m_l->data.conn_managed.is_reset = 1;
773 cor_sk_data_ready(cs_m_l);
774 cor_flush_buf(src_sock);
775 goto out_err;
778 if (cookie != src_sock->source.sock.keepalive_req_cookie) {
779 reset_needed = 1;
780 } else {
781 src_sock->source.sock.keepalive_intransit = 0;
782 src_sock->source.sock.jiffies_keepalive_lastact = jiffies;
783 cor_keepalive_req_sched_timer(src_sock);
786 spin_unlock_bh(&(src_sock->rcv_lock));
788 out_err:
789 if (reset_needed)
790 cor_reset_conn(src_sock);
793 int __init cor_conn_src_sock_init1(void)
795 INIT_DELAYED_WORK(&cor_flushtoconn_oom_work,
796 cor_mngdsocket_flushtoconn_oomresume);
798 return 0;
801 void __exit cor_conn_src_sock_exit1(void)
803 flush_delayed_work(&cor_flushtoconn_oom_work);
806 MODULE_LICENSE("GPL");