16 #include <linux/module.h>
37 .range = { 32768, 61000 },
61 int reuse = sk->sk_reuse;
73 (!sk->sk_bound_dev_if ||
74 !sk2->sk_bound_dev_if ||
75 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
76 if (!reuse || !sk2->sk_reuse ||
78 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
79 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
80 sk2_rcv_saddr == sk_rcv_saddr(sk))
83 if (!relax && reuse && sk2->sk_reuse &&
85 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
87 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
88 sk2_rcv_saddr == sk_rcv_saddr(sk))
106 int ret, attempts = 5;
107 struct net *
net = sock_net(sk);
108 int smallest_size = -1, smallest_rover;
112 int remaining, rover,
low,
high;
116 remaining = (high -
low) + 1;
121 if (inet_is_reserved_local_port(rover))
123 head = &hashinfo->
bhash[inet_bhashfn(net, rover,
125 spin_lock(&head->
lock);
127 if (net_eq(ib_net(tb), net) && tb->
port == rover) {
131 (tb->
num_owners < smallest_size || smallest_size == -1)) {
133 smallest_rover = rover;
135 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb,
false)) {
136 snum = smallest_rover;
140 if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb,
false)) {
148 spin_unlock(&head->
lock);
152 }
while (--remaining > 0);
161 if (remaining <= 0) {
162 if (smallest_size != -1) {
163 snum = smallest_rover;
174 head = &hashinfo->
bhash[inet_bhashfn(net, snum,
176 spin_lock(&head->
lock);
178 if (net_eq(ib_net(tb), net) && tb->
port == snum)
184 if (!hlist_empty(&tb->
owners)) {
190 smallest_size == -1) {
194 if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb,
true)) {
195 if (sk->sk_reuse && sk->sk_state !=
TCP_LISTEN &&
196 smallest_size != -1 && --attempts >= 0) {
197 spin_unlock(&head->
lock);
208 net, head, snum)) ==
NULL)
210 if (hlist_empty(&tb->
owners)) {
211 if (sk->sk_reuse && sk->sk_state !=
TCP_LISTEN)
216 (!sk->sk_reuse || sk->sk_state ==
TCP_LISTEN))
219 if (!inet_csk(sk)->icsk_bind_hash)
221 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
225 spin_unlock(&head->
lock);
236 static int inet_csk_wait_for_connect(
struct sock *
sk,
long timeo)
269 err = sock_intr_errno(timeo);
301 if (reqsk_queue_empty(queue)) {
302 long timeo = sock_rcvtimeo(sk, flags &
O_NONBLOCK);
309 error = inet_csk_wait_for_connect(sk, timeo);
313 req = reqsk_queue_remove(queue);
316 sk_acceptq_removed(sk);
350 void (*retransmit_handler)(
unsigned long),
351 void (*delack_handler)(
unsigned long),
352 void (*keepalive_handler)(
unsigned long))
360 setup_timer(&sk->sk_timer, keepalive_handler, (
unsigned long)sk);
396 struct net *
net = sock_net(sk);
397 int flags = inet_sk_flowi_flags(sk);
399 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->
sk_mark,
405 security_req_classify_flow(req, flowi4_to_flowi(fl4));
426 struct inet_sock *newinet = inet_sk(newsk);
428 struct net *
net = sock_net(sk);
432 fl4 = &newinet->
cork.fl.u.ip4;
436 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->
sk_mark,
441 security_req_classify_flow(req, flowi4_to_flowi(fl4));
462 return jhash_2words((
__force u32)raddr, (
__force u32)rport, rnd) & (synq_hsize - 1);
465 #if IS_ENABLED(CONFIG_IPV6)
466 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
468 #define AF_INET_FAMILY(fam) 1
482 (req = *prev) !=
NULL;
501 unsigned long timeout)
505 const u32 h = inet_synq_hash(inet_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port,
509 inet_csk_reqsk_queue_added(sk, timeout);
518 static inline void syn_ack_recalc(
struct request_sock *
req,
const int thresh,
519 const int max_retries,
520 const u8 rskq_defer_accept,
523 if (!rskq_defer_accept) {
524 *expire = req->
retrans >= thresh;
528 *expire = req->
retrans >= thresh &&
529 (!inet_rsk(req)->acked || req->
retrans >= max_retries);
535 *resend = !inet_rsk(req)->acked ||
536 req->
retrans >= rskq_defer_accept - 1;
541 const unsigned long timeout,
542 const unsigned long max_rto)
548 int thresh = max_retries;
553 if (lopt ==
NULL || lopt->
qlen == 0)
577 if (lopt->
qlen < young)
592 while ((req = *reqp) !=
NULL) {
594 int expire = 0, resend = 0;
596 syn_ack_recalc(req, thresh, max_retries,
599 req->
rsk_ops->syn_ack_timeout(parent, req);
603 inet_rsk(req)->acked)) {
608 timeo =
min((timeout << req->
retrans), max_rto);
615 inet_csk_reqsk_queue_unlink(parent, req, reqp);
616 reqsk_queue_removed(queue, req);
625 }
while (--budget > 0);
654 inet_sk(newsk)->inet_dport = inet_rsk(req)->rmt_port;
655 inet_sk(newsk)->inet_num =
ntohs(inet_rsk(req)->loc_port);
656 inet_sk(newsk)->inet_sport = inet_rsk(req)->loc_port;
666 security_inet_csk_clone(newsk, req);
689 sk->sk_prot->destroy(sk);
693 xfrm_sk_free_policy(sk);
697 percpu_counter_dec(sk->sk_prot->orphan_count);
713 inet_csk_delack_init(sk);
721 if (!sk->sk_prot->get_port(sk, inet->
inet_num)) {
725 sk->sk_prot->hash(sk);
750 acc_req = reqsk_queue_yank_acceptq(queue);
762 while ((req = acc_req) !=
NULL) {
776 percpu_counter_inc(sk->sk_prot->orphan_count);
779 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
788 tcp_sk(child)->fastopen_rsk =
NULL;
797 sk_acceptq_removed(sk);
803 acc_req = queue->
fastopenq->rskq_rst_head;
806 while ((req = acc_req) !=
NULL) {
818 const struct inet_sock *inet = inet_sk(sk);
821 sin->
sin_addr.s_addr = inet->inet_daddr;
828 char __user *optval,
int __user *optlen)
833 return icsk->
icsk_af_ops->compat_getsockopt(sk, level, optname,
835 return icsk->
icsk_af_ops->getsockopt(sk, level, optname,
841 char __user *optval,
unsigned int optlen)
846 return icsk->
icsk_af_ops->compat_setsockopt(sk, level, optname,
848 return icsk->
icsk_af_ops->setsockopt(sk, level, optname,
856 const struct inet_sock *inet = inet_sk(sk);
864 if (inet_opt && inet_opt->
opt.srr)
865 daddr = inet_opt->
opt.faddr;
867 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
886 dst = inet_csk_rebuild_route(sk, &inet->
cork.fl);
890 dst->
ops->update_pmtu(dst, sk,
NULL, mtu);
894 dst = inet_csk_rebuild_route(sk, &inet->
cork.fl);