phy: hix5hd2-sata: Check return value of platform_get_resource
[linux-2.6/btrfs-unstable.git] / net / ipv6 / inet6_connection_sock.c
blob29b32206e49488e1155900adfcd1707ea909855e
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Support for INET6 connection oriented protocols.
8 * Authors: See the TCPv6 sources
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/in6.h>
18 #include <linux/ipv6.h>
19 #include <linux/jhash.h>
20 #include <linux/slab.h>
22 #include <net/addrconf.h>
23 #include <net/inet_connection_sock.h>
24 #include <net/inet_ecn.h>
25 #include <net/inet_hashtables.h>
26 #include <net/ip6_route.h>
27 #include <net/sock.h>
28 #include <net/inet6_connection_sock.h>
30 int inet6_csk_bind_conflict(const struct sock *sk,
31 const struct inet_bind_bucket *tb, bool relax)
33 const struct sock *sk2;
34 int reuse = sk->sk_reuse;
35 int reuseport = sk->sk_reuseport;
36 kuid_t uid = sock_i_uid((struct sock *)sk);
38 /* We must walk the whole port owner list in this case. -DaveM */
40 * See comment in inet_csk_bind_conflict about sock lookup
41 * vs net namespaces issues.
43 sk_for_each_bound(sk2, &tb->owners) {
44 if (sk != sk2 &&
45 (!sk->sk_bound_dev_if ||
46 !sk2->sk_bound_dev_if ||
47 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
48 if ((!reuse || !sk2->sk_reuse ||
49 sk2->sk_state == TCP_LISTEN) &&
50 (!reuseport || !sk2->sk_reuseport ||
51 (sk2->sk_state != TCP_TIME_WAIT &&
52 !uid_eq(uid,
53 sock_i_uid((struct sock *)sk2))))) {
54 if (ipv6_rcv_saddr_equal(sk, sk2))
55 break;
57 if (!relax && reuse && sk2->sk_reuse &&
58 sk2->sk_state != TCP_LISTEN &&
59 ipv6_rcv_saddr_equal(sk, sk2))
60 break;
64 return sk2 != NULL;
66 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
68 struct dst_entry *inet6_csk_route_req(struct sock *sk,
69 struct flowi6 *fl6,
70 const struct request_sock *req)
72 struct inet_request_sock *ireq = inet_rsk(req);
73 struct ipv6_pinfo *np = inet6_sk(sk);
74 struct in6_addr *final_p, final;
75 struct dst_entry *dst;
77 memset(fl6, 0, sizeof(*fl6));
78 fl6->flowi6_proto = IPPROTO_TCP;
79 fl6->daddr = ireq->ir_v6_rmt_addr;
80 final_p = fl6_update_dst(fl6, np->opt, &final);
81 fl6->saddr = ireq->ir_v6_loc_addr;
82 fl6->flowi6_oif = ireq->ir_iif;
83 fl6->flowi6_mark = ireq->ir_mark;
84 fl6->fl6_dport = ireq->ir_rmt_port;
85 fl6->fl6_sport = htons(ireq->ir_num);
86 security_req_classify_flow(req, flowi6_to_flowi(fl6));
88 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
89 if (IS_ERR(dst))
90 return NULL;
92 return dst;
96 * request_sock (formerly open request) hash tables.
98 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
99 const u32 rnd, const u32 synq_hsize)
101 u32 c;
103 c = jhash_3words((__force u32)raddr->s6_addr32[0],
104 (__force u32)raddr->s6_addr32[1],
105 (__force u32)raddr->s6_addr32[2],
106 rnd);
108 c = jhash_2words((__force u32)raddr->s6_addr32[3],
109 (__force u32)rport,
112 return c & (synq_hsize - 1);
115 struct request_sock *inet6_csk_search_req(const struct sock *sk,
116 struct request_sock ***prevp,
117 const __be16 rport,
118 const struct in6_addr *raddr,
119 const struct in6_addr *laddr,
120 const int iif)
122 const struct inet_connection_sock *icsk = inet_csk(sk);
123 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
124 struct request_sock *req, **prev;
126 for (prev = &lopt->syn_table[inet6_synq_hash(raddr, rport,
127 lopt->hash_rnd,
128 lopt->nr_table_entries)];
129 (req = *prev) != NULL;
130 prev = &req->dl_next) {
131 const struct inet_request_sock *ireq = inet_rsk(req);
133 if (ireq->ir_rmt_port == rport &&
134 req->rsk_ops->family == AF_INET6 &&
135 ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
136 ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
137 (!ireq->ir_iif || ireq->ir_iif == iif)) {
138 WARN_ON(req->sk != NULL);
139 *prevp = prev;
140 return req;
144 return NULL;
146 EXPORT_SYMBOL_GPL(inet6_csk_search_req);
148 void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
149 struct request_sock *req,
150 const unsigned long timeout)
152 struct inet_connection_sock *icsk = inet_csk(sk);
153 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
154 const u32 h = inet6_synq_hash(&inet_rsk(req)->ir_v6_rmt_addr,
155 inet_rsk(req)->ir_rmt_port,
156 lopt->hash_rnd, lopt->nr_table_entries);
158 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
159 inet_csk_reqsk_queue_added(sk, timeout);
161 EXPORT_SYMBOL_GPL(inet6_csk_reqsk_queue_hash_add);
163 void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
165 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) uaddr;
167 sin6->sin6_family = AF_INET6;
168 sin6->sin6_addr = sk->sk_v6_daddr;
169 sin6->sin6_port = inet_sk(sk)->inet_dport;
170 /* We do not store received flowlabel for TCP */
171 sin6->sin6_flowinfo = 0;
172 sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
173 sk->sk_bound_dev_if);
175 EXPORT_SYMBOL_GPL(inet6_csk_addr2sockaddr);
177 static inline
178 void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
179 const struct in6_addr *daddr,
180 const struct in6_addr *saddr)
182 __ip6_dst_store(sk, dst, daddr, saddr);
185 static inline
186 struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
188 return __sk_dst_check(sk, cookie);
191 static struct dst_entry *inet6_csk_route_socket(struct sock *sk,
192 struct flowi6 *fl6)
194 struct inet_sock *inet = inet_sk(sk);
195 struct ipv6_pinfo *np = inet6_sk(sk);
196 struct in6_addr *final_p, final;
197 struct dst_entry *dst;
199 memset(fl6, 0, sizeof(*fl6));
200 fl6->flowi6_proto = sk->sk_protocol;
201 fl6->daddr = sk->sk_v6_daddr;
202 fl6->saddr = np->saddr;
203 fl6->flowlabel = np->flow_label;
204 IP6_ECN_flow_xmit(sk, fl6->flowlabel);
205 fl6->flowi6_oif = sk->sk_bound_dev_if;
206 fl6->flowi6_mark = sk->sk_mark;
207 fl6->fl6_sport = inet->inet_sport;
208 fl6->fl6_dport = inet->inet_dport;
209 security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
211 final_p = fl6_update_dst(fl6, np->opt, &final);
213 dst = __inet6_csk_dst_check(sk, np->dst_cookie);
214 if (!dst) {
215 dst = ip6_dst_lookup_flow(sk, fl6, final_p);
217 if (!IS_ERR(dst))
218 __inet6_csk_dst_store(sk, dst, NULL, NULL);
220 return dst;
223 int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused)
225 struct ipv6_pinfo *np = inet6_sk(sk);
226 struct flowi6 fl6;
227 struct dst_entry *dst;
228 int res;
230 dst = inet6_csk_route_socket(sk, &fl6);
231 if (IS_ERR(dst)) {
232 sk->sk_err_soft = -PTR_ERR(dst);
233 sk->sk_route_caps = 0;
234 kfree_skb(skb);
235 return PTR_ERR(dst);
238 rcu_read_lock();
239 skb_dst_set_noref(skb, dst);
241 /* Restore final destination back after routing done */
242 fl6.daddr = sk->sk_v6_daddr;
244 res = ip6_xmit(sk, skb, &fl6, np->opt, np->tclass);
245 rcu_read_unlock();
246 return res;
248 EXPORT_SYMBOL_GPL(inet6_csk_xmit);
250 struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
252 struct flowi6 fl6;
253 struct dst_entry *dst = inet6_csk_route_socket(sk, &fl6);
255 if (IS_ERR(dst))
256 return NULL;
257 dst->ops->update_pmtu(dst, sk, NULL, mtu);
259 dst = inet6_csk_route_socket(sk, &fl6);
260 return IS_ERR(dst) ? NULL : dst;
262 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);