2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Support for INET connection oriented protocols.
8 * Authors: See the TCP sources
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/jhash.h>
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
28 const char inet_csk_timer_bug_msg
[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg
);
33 * This struct holds the first and last local port number.
35 struct local_ports sysctl_local_ports __read_mostly
= {
36 .lock
= SEQLOCK_UNLOCKED
,
37 .range
= { 32768, 61000 },
40 unsigned long *sysctl_local_reserved_ports
;
41 EXPORT_SYMBOL(sysctl_local_reserved_ports
);
43 void inet_get_local_port_range(int *low
, int *high
)
47 seq
= read_seqbegin(&sysctl_local_ports
.lock
);
49 *low
= sysctl_local_ports
.range
[0];
50 *high
= sysctl_local_ports
.range
[1];
51 } while (read_seqretry(&sysctl_local_ports
.lock
, seq
));
53 EXPORT_SYMBOL(inet_get_local_port_range
);
55 int inet_csk_bind_conflict(const struct sock
*sk
,
56 const struct inet_bind_bucket
*tb
)
58 const __be32 sk_rcv_saddr
= inet_rcv_saddr(sk
);
60 struct hlist_node
*node
;
61 int reuse
= sk
->sk_reuse
;
64 * Unlike other sk lookup places we do not check
65 * for sk_net here, since _all_ the socks listed
66 * in tb->owners list belong to the same net - the
67 * one this bucket belongs to.
70 sk_for_each_bound(sk2
, node
, &tb
->owners
) {
72 !inet_v6_ipv6only(sk2
) &&
73 (!sk
->sk_bound_dev_if
||
74 !sk2
->sk_bound_dev_if
||
75 sk
->sk_bound_dev_if
== sk2
->sk_bound_dev_if
)) {
76 if (!reuse
|| !sk2
->sk_reuse
||
77 sk2
->sk_state
== TCP_LISTEN
) {
78 const __be32 sk2_rcv_saddr
= inet_rcv_saddr(sk2
);
79 if (!sk2_rcv_saddr
|| !sk_rcv_saddr
||
80 sk2_rcv_saddr
== sk_rcv_saddr
)
88 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict
);
90 /* Obtain a reference to a local port for the given sock,
91 * if snum is zero it means select any available local port.
93 int inet_csk_get_port(struct sock
*sk
, unsigned short snum
)
95 struct inet_hashinfo
*hashinfo
= sk
->sk_prot
->h
.hashinfo
;
96 struct inet_bind_hashbucket
*head
;
97 struct hlist_node
*node
;
98 struct inet_bind_bucket
*tb
;
99 int ret
, attempts
= 5;
100 struct net
*net
= sock_net(sk
);
101 int smallest_size
= -1, smallest_rover
;
105 int remaining
, rover
, low
, high
;
108 inet_get_local_port_range(&low
, &high
);
109 remaining
= (high
- low
) + 1;
110 smallest_rover
= rover
= net_random() % remaining
+ low
;
114 if (inet_is_reserved_local_port(rover
))
116 head
= &hashinfo
->bhash
[inet_bhashfn(net
, rover
,
117 hashinfo
->bhash_size
)];
118 spin_lock(&head
->lock
);
119 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
120 if (net_eq(ib_net(tb
), net
) && tb
->port
== rover
) {
121 if (tb
->fastreuse
> 0 &&
123 sk
->sk_state
!= TCP_LISTEN
&&
124 (tb
->num_owners
< smallest_size
|| smallest_size
== -1)) {
125 smallest_size
= tb
->num_owners
;
126 smallest_rover
= rover
;
127 if (atomic_read(&hashinfo
->bsockets
) > (high
- low
) + 1) {
128 spin_unlock(&head
->lock
);
129 snum
= smallest_rover
;
137 spin_unlock(&head
->lock
);
141 } while (--remaining
> 0);
143 /* Exhausted local port range during search? It is not
144 * possible for us to be holding one of the bind hash
145 * locks if this test triggers, because if 'remaining'
146 * drops to zero, we broke out of the do/while loop at
147 * the top level, not from the 'break;' statement.
150 if (remaining
<= 0) {
151 if (smallest_size
!= -1) {
152 snum
= smallest_rover
;
157 /* OK, here is the one we will use. HEAD is
158 * non-NULL and we hold it's mutex.
163 head
= &hashinfo
->bhash
[inet_bhashfn(net
, snum
,
164 hashinfo
->bhash_size
)];
165 spin_lock(&head
->lock
);
166 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
167 if (net_eq(ib_net(tb
), net
) && tb
->port
== snum
)
173 if (!hlist_empty(&tb
->owners
)) {
174 if (tb
->fastreuse
> 0 &&
175 sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
&&
176 smallest_size
== -1) {
180 if (inet_csk(sk
)->icsk_af_ops
->bind_conflict(sk
, tb
)) {
181 if (sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
&&
182 smallest_size
!= -1 && --attempts
>= 0) {
183 spin_unlock(&head
->lock
);
192 if (!tb
&& (tb
= inet_bind_bucket_create(hashinfo
->bind_bucket_cachep
,
193 net
, head
, snum
)) == NULL
)
195 if (hlist_empty(&tb
->owners
)) {
196 if (sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
)
200 } else if (tb
->fastreuse
&&
201 (!sk
->sk_reuse
|| sk
->sk_state
== TCP_LISTEN
))
204 if (!inet_csk(sk
)->icsk_bind_hash
)
205 inet_bind_hash(sk
, tb
, snum
);
206 WARN_ON(inet_csk(sk
)->icsk_bind_hash
!= tb
);
210 spin_unlock(&head
->lock
);
216 EXPORT_SYMBOL_GPL(inet_csk_get_port
);
219 * Wait for an incoming connection, avoid race conditions. This must be called
220 * with the socket locked.
222 static int inet_csk_wait_for_connect(struct sock
*sk
, long timeo
)
224 struct inet_connection_sock
*icsk
= inet_csk(sk
);
229 * True wake-one mechanism for incoming connections: only
230 * one process gets woken up, not the 'whole herd'.
231 * Since we do not 'race & poll' for established sockets
232 * anymore, the common case will execute the loop only once.
234 * Subtle issue: "add_wait_queue_exclusive()" will be added
235 * after any current non-exclusive waiters, and we know that
236 * it will always _stay_ after any new non-exclusive waiters
237 * because all non-exclusive waiters are added at the
238 * beginning of the wait-queue. As such, it's ok to "drop"
239 * our exclusiveness temporarily when we get woken up without
240 * having to remove and re-insert us on the wait queue.
243 prepare_to_wait_exclusive(sk_sleep(sk
), &wait
,
246 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
))
247 timeo
= schedule_timeout(timeo
);
250 if (!reqsk_queue_empty(&icsk
->icsk_accept_queue
))
253 if (sk
->sk_state
!= TCP_LISTEN
)
255 err
= sock_intr_errno(timeo
);
256 if (signal_pending(current
))
262 finish_wait(sk_sleep(sk
), &wait
);
267 * This will accept the next outstanding connection.
269 struct sock
*inet_csk_accept(struct sock
*sk
, int flags
, int *err
)
271 struct inet_connection_sock
*icsk
= inet_csk(sk
);
277 /* We need to make sure that this socket is listening,
278 * and that it has something pending.
281 if (sk
->sk_state
!= TCP_LISTEN
)
284 /* Find already established connection */
285 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
)) {
286 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
288 /* If this is a non blocking socket don't sleep */
293 error
= inet_csk_wait_for_connect(sk
, timeo
);
298 newsk
= reqsk_queue_get_child(&icsk
->icsk_accept_queue
, sk
);
299 WARN_ON(newsk
->sk_state
== TCP_SYN_RECV
);
309 EXPORT_SYMBOL(inet_csk_accept
);
312 * Using different timers for retransmit, delayed acks and probes
313 * We may wish use just one timer maintaining a list of expire jiffies
316 void inet_csk_init_xmit_timers(struct sock
*sk
,
317 void (*retransmit_handler
)(unsigned long),
318 void (*delack_handler
)(unsigned long),
319 void (*keepalive_handler
)(unsigned long))
321 struct inet_connection_sock
*icsk
= inet_csk(sk
);
323 setup_timer(&icsk
->icsk_retransmit_timer
, retransmit_handler
,
325 setup_timer(&icsk
->icsk_delack_timer
, delack_handler
,
327 setup_timer(&sk
->sk_timer
, keepalive_handler
, (unsigned long)sk
);
328 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= 0;
331 EXPORT_SYMBOL(inet_csk_init_xmit_timers
);
333 void inet_csk_clear_xmit_timers(struct sock
*sk
)
335 struct inet_connection_sock
*icsk
= inet_csk(sk
);
337 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= icsk
->icsk_ack
.blocked
= 0;
339 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
340 sk_stop_timer(sk
, &icsk
->icsk_delack_timer
);
341 sk_stop_timer(sk
, &sk
->sk_timer
);
344 EXPORT_SYMBOL(inet_csk_clear_xmit_timers
);
346 void inet_csk_delete_keepalive_timer(struct sock
*sk
)
348 sk_stop_timer(sk
, &sk
->sk_timer
);
351 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer
);
353 void inet_csk_reset_keepalive_timer(struct sock
*sk
, unsigned long len
)
355 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ len
);
358 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer
);
360 struct dst_entry
*inet_csk_route_req(struct sock
*sk
,
361 const struct request_sock
*req
)
364 const struct inet_request_sock
*ireq
= inet_rsk(req
);
365 struct ip_options
*opt
= inet_rsk(req
)->opt
;
366 struct flowi fl
= { .oif
= sk
->sk_bound_dev_if
,
369 { .daddr
= ((opt
&& opt
->srr
) ?
372 .saddr
= ireq
->loc_addr
,
373 .tos
= RT_CONN_FLAGS(sk
) } },
374 .proto
= sk
->sk_protocol
,
375 .flags
= inet_sk_flowi_flags(sk
),
377 { .sport
= inet_sk(sk
)->inet_sport
,
378 .dport
= ireq
->rmt_port
} } };
379 struct net
*net
= sock_net(sk
);
381 security_req_classify_flow(req
, &fl
);
382 if (ip_route_output_flow(net
, &rt
, &fl
, sk
, 0))
384 if (opt
&& opt
->is_strictroute
&& rt
->rt_dst
!= rt
->rt_gateway
)
391 IP_INC_STATS_BH(net
, IPSTATS_MIB_OUTNOROUTES
);
395 EXPORT_SYMBOL_GPL(inet_csk_route_req
);
397 static inline u32
inet_synq_hash(const __be32 raddr
, const __be16 rport
,
398 const u32 rnd
, const u32 synq_hsize
)
400 return jhash_2words((__force u32
)raddr
, (__force u32
)rport
, rnd
) & (synq_hsize
- 1);
403 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
404 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
406 #define AF_INET_FAMILY(fam) 1
409 struct request_sock
*inet_csk_search_req(const struct sock
*sk
,
410 struct request_sock
***prevp
,
411 const __be16 rport
, const __be32 raddr
,
414 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
415 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
416 struct request_sock
*req
, **prev
;
418 for (prev
= &lopt
->syn_table
[inet_synq_hash(raddr
, rport
, lopt
->hash_rnd
,
419 lopt
->nr_table_entries
)];
420 (req
= *prev
) != NULL
;
421 prev
= &req
->dl_next
) {
422 const struct inet_request_sock
*ireq
= inet_rsk(req
);
424 if (ireq
->rmt_port
== rport
&&
425 ireq
->rmt_addr
== raddr
&&
426 ireq
->loc_addr
== laddr
&&
427 AF_INET_FAMILY(req
->rsk_ops
->family
)) {
437 EXPORT_SYMBOL_GPL(inet_csk_search_req
);
439 void inet_csk_reqsk_queue_hash_add(struct sock
*sk
, struct request_sock
*req
,
440 unsigned long timeout
)
442 struct inet_connection_sock
*icsk
= inet_csk(sk
);
443 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
444 const u32 h
= inet_synq_hash(inet_rsk(req
)->rmt_addr
, inet_rsk(req
)->rmt_port
,
445 lopt
->hash_rnd
, lopt
->nr_table_entries
);
447 reqsk_queue_hash_req(&icsk
->icsk_accept_queue
, h
, req
, timeout
);
448 inet_csk_reqsk_queue_added(sk
, timeout
);
451 /* Only thing we need from tcp.h */
452 extern int sysctl_tcp_synack_retries
;
454 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add
);
456 /* Decide when to expire the request and when to resend SYN-ACK */
457 static inline void syn_ack_recalc(struct request_sock
*req
, const int thresh
,
458 const int max_retries
,
459 const u8 rskq_defer_accept
,
460 int *expire
, int *resend
)
462 if (!rskq_defer_accept
) {
463 *expire
= req
->retrans
>= thresh
;
467 *expire
= req
->retrans
>= thresh
&&
468 (!inet_rsk(req
)->acked
|| req
->retrans
>= max_retries
);
470 * Do not resend while waiting for data after ACK,
471 * start to resend on end of deferring period to give
472 * last chance for data or ACK to create established socket.
474 *resend
= !inet_rsk(req
)->acked
||
475 req
->retrans
>= rskq_defer_accept
- 1;
478 void inet_csk_reqsk_queue_prune(struct sock
*parent
,
479 const unsigned long interval
,
480 const unsigned long timeout
,
481 const unsigned long max_rto
)
483 struct inet_connection_sock
*icsk
= inet_csk(parent
);
484 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
485 struct listen_sock
*lopt
= queue
->listen_opt
;
486 int max_retries
= icsk
->icsk_syn_retries
? : sysctl_tcp_synack_retries
;
487 int thresh
= max_retries
;
488 unsigned long now
= jiffies
;
489 struct request_sock
**reqp
, *req
;
492 if (lopt
== NULL
|| lopt
->qlen
== 0)
495 /* Normally all the openreqs are young and become mature
496 * (i.e. converted to established socket) for first timeout.
497 * If synack was not acknowledged for 3 seconds, it means
498 * one of the following things: synack was lost, ack was lost,
499 * rtt is high or nobody planned to ack (i.e. synflood).
500 * When server is a bit loaded, queue is populated with old
501 * open requests, reducing effective size of queue.
502 * When server is well loaded, queue size reduces to zero
503 * after several minutes of work. It is not synflood,
504 * it is normal operation. The solution is pruning
505 * too old entries overriding normal timeout, when
506 * situation becomes dangerous.
508 * Essentially, we reserve half of room for young
509 * embrions; and abort old ones without pity, if old
510 * ones are about to clog our table.
512 if (lopt
->qlen
>>(lopt
->max_qlen_log
-1)) {
513 int young
= (lopt
->qlen_young
<<1);
516 if (lopt
->qlen
< young
)
523 if (queue
->rskq_defer_accept
)
524 max_retries
= queue
->rskq_defer_accept
;
526 budget
= 2 * (lopt
->nr_table_entries
/ (timeout
/ interval
));
527 i
= lopt
->clock_hand
;
530 reqp
=&lopt
->syn_table
[i
];
531 while ((req
= *reqp
) != NULL
) {
532 if (time_after_eq(now
, req
->expires
)) {
533 int expire
= 0, resend
= 0;
535 syn_ack_recalc(req
, thresh
, max_retries
,
536 queue
->rskq_defer_accept
,
538 if (req
->rsk_ops
->syn_ack_timeout
)
539 req
->rsk_ops
->syn_ack_timeout(parent
, req
);
542 !req
->rsk_ops
->rtx_syn_ack(parent
, req
, NULL
) ||
543 inet_rsk(req
)->acked
)) {
546 if (req
->retrans
++ == 0)
548 timeo
= min((timeout
<< req
->retrans
), max_rto
);
549 req
->expires
= now
+ timeo
;
550 reqp
= &req
->dl_next
;
554 /* Drop this request */
555 inet_csk_reqsk_queue_unlink(parent
, req
, reqp
);
556 reqsk_queue_removed(queue
, req
);
560 reqp
= &req
->dl_next
;
563 i
= (i
+ 1) & (lopt
->nr_table_entries
- 1);
565 } while (--budget
> 0);
567 lopt
->clock_hand
= i
;
570 inet_csk_reset_keepalive_timer(parent
, interval
);
573 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune
);
575 struct sock
*inet_csk_clone(struct sock
*sk
, const struct request_sock
*req
,
576 const gfp_t priority
)
578 struct sock
*newsk
= sk_clone(sk
, priority
);
581 struct inet_connection_sock
*newicsk
= inet_csk(newsk
);
583 newsk
->sk_state
= TCP_SYN_RECV
;
584 newicsk
->icsk_bind_hash
= NULL
;
586 inet_sk(newsk
)->inet_dport
= inet_rsk(req
)->rmt_port
;
587 inet_sk(newsk
)->inet_num
= ntohs(inet_rsk(req
)->loc_port
);
588 inet_sk(newsk
)->inet_sport
= inet_rsk(req
)->loc_port
;
589 newsk
->sk_write_space
= sk_stream_write_space
;
591 newicsk
->icsk_retransmits
= 0;
592 newicsk
->icsk_backoff
= 0;
593 newicsk
->icsk_probes_out
= 0;
595 /* Deinitialize accept_queue to trap illegal accesses. */
596 memset(&newicsk
->icsk_accept_queue
, 0, sizeof(newicsk
->icsk_accept_queue
));
598 security_inet_csk_clone(newsk
, req
);
603 EXPORT_SYMBOL_GPL(inet_csk_clone
);
606 * At this point, there should be no process reference to this
607 * socket, and thus no user references at all. Therefore we
608 * can assume the socket waitqueue is inactive and nobody will
609 * try to jump onto it.
611 void inet_csk_destroy_sock(struct sock
*sk
)
613 WARN_ON(sk
->sk_state
!= TCP_CLOSE
);
614 WARN_ON(!sock_flag(sk
, SOCK_DEAD
));
616 /* It cannot be in hash table! */
617 WARN_ON(!sk_unhashed(sk
));
619 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
620 WARN_ON(inet_sk(sk
)->inet_num
&& !inet_csk(sk
)->icsk_bind_hash
);
622 sk
->sk_prot
->destroy(sk
);
624 sk_stream_kill_queues(sk
);
626 xfrm_sk_free_policy(sk
);
628 sk_refcnt_debug_release(sk
);
630 percpu_counter_dec(sk
->sk_prot
->orphan_count
);
634 EXPORT_SYMBOL(inet_csk_destroy_sock
);
636 int inet_csk_listen_start(struct sock
*sk
, const int nr_table_entries
)
638 struct inet_sock
*inet
= inet_sk(sk
);
639 struct inet_connection_sock
*icsk
= inet_csk(sk
);
640 int rc
= reqsk_queue_alloc(&icsk
->icsk_accept_queue
, nr_table_entries
);
645 sk
->sk_max_ack_backlog
= 0;
646 sk
->sk_ack_backlog
= 0;
647 inet_csk_delack_init(sk
);
649 /* There is race window here: we announce ourselves listening,
650 * but this transition is still not validated by get_port().
651 * It is OK, because this socket enters to hash table only
652 * after validation is complete.
654 sk
->sk_state
= TCP_LISTEN
;
655 if (!sk
->sk_prot
->get_port(sk
, inet
->inet_num
)) {
656 inet
->inet_sport
= htons(inet
->inet_num
);
659 sk
->sk_prot
->hash(sk
);
664 sk
->sk_state
= TCP_CLOSE
;
665 __reqsk_queue_destroy(&icsk
->icsk_accept_queue
);
669 EXPORT_SYMBOL_GPL(inet_csk_listen_start
);
672 * This routine closes sockets which have been at least partially
673 * opened, but not yet accepted.
675 void inet_csk_listen_stop(struct sock
*sk
)
677 struct inet_connection_sock
*icsk
= inet_csk(sk
);
678 struct request_sock
*acc_req
;
679 struct request_sock
*req
;
681 inet_csk_delete_keepalive_timer(sk
);
683 /* make all the listen_opt local to us */
684 acc_req
= reqsk_queue_yank_acceptq(&icsk
->icsk_accept_queue
);
686 /* Following specs, it would be better either to send FIN
687 * (and enter FIN-WAIT-1, it is normal close)
688 * or to send active reset (abort).
689 * Certainly, it is pretty dangerous while synflood, but it is
690 * bad justification for our negligence 8)
691 * To be honest, we are not able to make either
692 * of the variants now. --ANK
694 reqsk_queue_destroy(&icsk
->icsk_accept_queue
);
696 while ((req
= acc_req
) != NULL
) {
697 struct sock
*child
= req
->sk
;
699 acc_req
= req
->dl_next
;
703 WARN_ON(sock_owned_by_user(child
));
706 sk
->sk_prot
->disconnect(child
, O_NONBLOCK
);
710 percpu_counter_inc(sk
->sk_prot
->orphan_count
);
712 inet_csk_destroy_sock(child
);
714 bh_unlock_sock(child
);
718 sk_acceptq_removed(sk
);
721 WARN_ON(sk
->sk_ack_backlog
);
724 EXPORT_SYMBOL_GPL(inet_csk_listen_stop
);
726 void inet_csk_addr2sockaddr(struct sock
*sk
, struct sockaddr
*uaddr
)
728 struct sockaddr_in
*sin
= (struct sockaddr_in
*)uaddr
;
729 const struct inet_sock
*inet
= inet_sk(sk
);
731 sin
->sin_family
= AF_INET
;
732 sin
->sin_addr
.s_addr
= inet
->inet_daddr
;
733 sin
->sin_port
= inet
->inet_dport
;
736 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr
);
739 int inet_csk_compat_getsockopt(struct sock
*sk
, int level
, int optname
,
740 char __user
*optval
, int __user
*optlen
)
742 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
744 if (icsk
->icsk_af_ops
->compat_getsockopt
!= NULL
)
745 return icsk
->icsk_af_ops
->compat_getsockopt(sk
, level
, optname
,
747 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
751 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt
);
753 int inet_csk_compat_setsockopt(struct sock
*sk
, int level
, int optname
,
754 char __user
*optval
, unsigned int optlen
)
756 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
758 if (icsk
->icsk_af_ops
->compat_setsockopt
!= NULL
)
759 return icsk
->icsk_af_ops
->compat_setsockopt(sk
, level
, optname
,
761 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
765 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt
);