2 * COPYRIGHT: See COPYING in the top level directory
3 * PROJECT: ReactOS TCP/IP protocol driver
4 * FILE: transport/tcp/tcp_input.c
5 * PURPOSE: Transmission Control Protocol
6 * PROGRAMMERS: Casper S. Hornstrup (chorns@users.sourceforge.net)
8 * CSH 15-01-2003 Imported from linux kernel 2.4.20
12 * INET An implementation of the TCP/IP protocol suite for the LINUX
13 * operating system. INET is implemented using the BSD Socket
14 * interface as the means of communication with the user level.
16 * Implementation of the Transmission Control Protocol(TCP).
20 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
21 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
22 * Mark Evans, <evansmp@uhura.aston.ac.uk>
23 * Corey Minyard <wf-rch!minyard@relay.EU.net>
24 * Florian La Roche, <flla@stud.uni-sb.de>
25 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
26 * Linus Torvalds, <torvalds@cs.helsinki.fi>
27 * Alan Cox, <gw4pts@gw4pts.ampr.org>
28 * Matthew Dillon, <dillon@apollo.west.oic.com>
29 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
30 * Jorge Cwik, <jorge@laser.satlink.net>
40 int sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
41 int sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
42 //int sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
43 int sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
44 //int sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
45 int sysctl_tcp_retries1 = TCP_RETR1;
46 int sysctl_tcp_retries2 = TCP_RETR2;
47 int sysctl_tcp_orphan_retries;
49 static void tcp_write_timer(unsigned long);
50 static void tcp_delack_timer(unsigned long);
51 static void tcp_keepalive_timer (unsigned long data);
53 //const char timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
56 * Using different timers for retransmit, delayed acks and probes
57 * We may wish use just one timer maintaining a list of expire jiffies
61 void tcp_init_xmit_timers(struct sock *sk)
64 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
66 init_timer(&tp->retransmit_timer);
67 tp->retransmit_timer.function=&tcp_write_timer;
68 tp->retransmit_timer.data = (unsigned long) sk;
71 init_timer(&tp->delack_timer);
72 tp->delack_timer.function=&tcp_delack_timer;
73 tp->delack_timer.data = (unsigned long) sk;
76 init_timer(&sk->timer);
77 sk->timer.function=&tcp_keepalive_timer;
78 sk->timer.data = (unsigned long) sk;
82 void tcp_clear_xmit_timers(struct sock *sk)
85 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
88 if (timer_pending(&tp->retransmit_timer) &&
89 del_timer(&tp->retransmit_timer))
94 if (timer_pending(&tp->delack_timer) &&
95 del_timer(&tp->delack_timer))
98 if(timer_pending(&sk->timer) && del_timer(&sk->timer))
103 static void tcp_write_err(struct sock *sk)
106 sk->err = sk->err_soft ? : ETIMEDOUT;
107 sk->error_report(sk);
110 NET_INC_STATS_BH(TCPAbortOnTimeout);
114 /* Do not allow orphaned sockets to eat all our resources.
115 * This is direct violation of TCP specs, but it is required
116 * to prevent DoS attacks. It is called when a retransmission timeout
117 * or zero probe timeout occurs on orphaned socket.
119 * Criterium is still not confirmed experimentally and may change.
120 * We kill the socket, if:
121 * 1. If number of orphaned sockets exceeds an administratively configured
123 * 2. If we have strong memory pressure.
125 static int tcp_out_of_resources(struct sock *sk, int do_reset)
128 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
129 int orphans = atomic_read(&tcp_orphan_count);
131 /* If peer does not open window for long time, or did not transmit
132 * anything for long time, penalize it. */
133 if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
136 /* If some dubious ICMP arrived, penalize even more. */
140 if (orphans >= sysctl_tcp_max_orphans ||
141 (sk->wmem_queued > SOCK_MIN_SNDBUF &&
142 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
144 printk(KERN_INFO "Out of socket memory\n");
146 /* Catch exceptional cases, when connection requires reset.
147 * 1. Last segment was sent recently. */
148 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
149 /* 2. Window is closed. */
150 (!tp->snd_wnd && !tp->packets_out))
153 tcp_send_active_reset(sk, GFP_ATOMIC);
155 NET_INC_STATS_BH(TCPAbortOnMemory);
164 /* Calculate maximal number or retries on an orphaned socket. */
165 static int tcp_orphan_retries(struct sock *sk, int alive)
168 int retries = sysctl_tcp_orphan_retries; /* May be zero. */
170 /* We know from an ICMP that something is wrong. */
171 if (sk->err_soft && !alive)
174 /* However, if socket sent something recently, select some safe
175 * number of retries. 8 corresponds to >100 seconds with minimal
177 if (retries == 0 && alive)
185 /* A write timeout has occurred. Process the after effects. */
186 static int tcp_write_timeout(struct sock *sk)
189 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
192 if ((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV)) {
194 dst_negative_advice(&sk->dst_cache);
195 retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries;
197 if (tp->retransmits >= sysctl_tcp_retries1) {
198 /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black
201 It is place to make it. It is not made. I do not want
202 to make it. It is disguisting. It does not work in any
203 case. Let me to cite the same draft, which requires for
204 us to implement this:
206 "The one security concern raised by this memo is that ICMP black holes
207 are often caused by over-zealous security administrators who block
208 all ICMP messages. It is vitally important that those who design and
209 deploy security systems understand the impact of strict filtering on
210 upper-layer protocols. The safest web site in the world is worthless
211 if most TCP implementations cannot transfer data from it. It would
212 be far nicer to have all of the black holes fixed rather than fixing
213 all of the TCP implementations."
218 dst_negative_advice(&sk->dst_cache);
221 retry_until = sysctl_tcp_retries2;
223 int alive = (tp->rto < TCP_RTO_MAX);
225 retry_until = tcp_orphan_retries(sk, alive);
227 if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until))
232 if (tp->retransmits >= retry_until) {
233 /* Has it gone just too far? */
243 static void tcp_delack_timer(unsigned long data)
246 struct sock *sk = (struct sock*)data;
247 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
250 if (sk->lock.users) {
251 /* Try again later. */
253 NET_INC_STATS_BH(DelayedACKLocked);
254 if (!mod_timer(&tp->delack_timer, jiffies + TCP_DELACK_MIN))
261 if (sk->state == TCP_CLOSE || !(tp->ack.pending&TCP_ACK_TIMER))
264 if ((long)(tp->ack.timeout - jiffies) > 0) {
265 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
269 tp->ack.pending &= ~TCP_ACK_TIMER;
271 if (skb_queue_len(&tp->ucopy.prequeue)) {
274 net_statistics[smp_processor_id()*2].TCPSchedulerFailed += skb_queue_len(&tp->ucopy.prequeue);
276 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
277 sk->backlog_rcv(sk, skb);
279 tp->ucopy.memory = 0;
282 if (tcp_ack_scheduled(tp)) {
283 if (!tp->ack.pingpong) {
284 /* Delayed ACK missed: inflate ATO. */
285 tp->ack.ato = min(tp->ack.ato << 1, tp->rto);
287 /* Delayed ACK missed: leave pingpong mode and
290 tp->ack.pingpong = 0;
291 tp->ack.ato = TCP_ATO_MIN;
294 NET_INC_STATS_BH(DelayedACKs);
299 if (tcp_memory_pressure)
307 static void tcp_probe_timer(struct sock *sk)
310 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
313 if (tp->packets_out || !tp->send_head) {
318 /* *WARNING* RFC 1122 forbids this
320 * It doesn't AFAIK, because we kill the retransmit timer -AK
322 * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
323 * this behaviour in Solaris down as a bug fix. [AC]
325 * Let me to explain. probes_out is zeroed by incoming ACKs
326 * even if they advertise zero window. Hence, connection is killed only
327 * if we received no ACKs for normal connection timeout. It is not killed
328 * only because window stays zero for some time, window may be zero
329 * until armageddon and even later. We are in full accordance
330 * with RFCs, only probe timer combines both retransmission timeout
331 * and probe timeout in one bottle. --ANK
333 max_probes = sysctl_tcp_retries2;
336 int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX);
338 max_probes = tcp_orphan_retries(sk, alive);
340 if (tcp_out_of_resources(sk, alive || tp->probes_out <= max_probes))
344 if (tp->probes_out > max_probes) {
347 /* Only send another probe if we didn't close things up. */
354 * The TCP retransmit timer.
357 static void tcp_retransmit_timer(struct sock *sk)
360 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
362 if (tp->packets_out == 0)
365 BUG_TRAP(!skb_queue_empty(&sk->write_queue));
367 if (tp->snd_wnd == 0 && !sk->dead &&
368 !((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV))) {
369 /* Receiver dastardly shrinks window. Our retransmits
370 * become zero probes, but we should not timeout this
371 * connection. If the socket is an orphan, time it out,
372 * we cannot allow such beasts to hang infinitely.
376 printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n",
377 NIPQUAD(sk->daddr), htons(sk->dport), sk->num,
378 tp->snd_una, tp->snd_nxt);
380 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
384 tcp_enter_loss(sk, 0);
385 tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
387 goto out_reset_timer;
390 if (tcp_write_timeout(sk))
393 if (tp->retransmits == 0) {
394 if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
396 if (tp->ca_state == TCP_CA_Recovery)
397 NET_INC_STATS_BH(TCPSackRecoveryFail);
399 NET_INC_STATS_BH(TCPSackFailures);
401 if (tp->ca_state == TCP_CA_Recovery)
402 NET_INC_STATS_BH(TCPRenoRecoveryFail);
404 NET_INC_STATS_BH(TCPRenoFailures);
406 } else if (tp->ca_state == TCP_CA_Loss) {
407 NET_INC_STATS_BH(TCPLossFailures);
409 NET_INC_STATS_BH(TCPTimeouts);
413 tcp_enter_loss(sk, 0);
415 if (tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)) > 0) {
416 /* Retransmission failed because of local congestion,
419 if (!tp->retransmits)
421 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS,
422 min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL));
426 /* Increase the timeout each time we retransmit. Note that
427 * we do not increase the rtt estimate. rto is initialized
428 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
429 * that doubling rto each time is the least we can get away with.
430 * In KA9Q, Karn uses this for the first few times, and then
431 * goes to quadratic. netBSD doubles, but only goes up to *64,
432 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
433 * defined in the protocol as the maximum possible RTT. I guess
434 * we'll have to use something other than TCP to talk to the
435 * University of Mars.
437 * PAWS allows us longer timeouts and large windows, so once
438 * implemented ftp to mars will work nicely. We will have to fix
439 * the 120 second clamps though!
445 tp->rto = min(tp->rto << 1, TCP_RTO_MAX);
446 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
447 if (tp->retransmits > sysctl_tcp_retries1)
454 static void tcp_write_timer(unsigned long data)
457 struct sock *sk = (struct sock*)data;
458 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
462 if (sk->lock.users) {
463 /* Try again later */
464 if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20)))
469 if (sk->state == TCP_CLOSE || !tp->pending)
472 if ((long)(tp->timeout - jiffies) > 0) {
473 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
482 case TCP_TIME_RETRANS:
483 tcp_retransmit_timer(sk);
485 case TCP_TIME_PROBE0:
500 * Timer for listening sockets
503 static void tcp_synack_timer(struct sock *sk)
506 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
507 struct tcp_listen_opt *lopt = tp->listen_opt;
508 int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
509 int thresh = max_retries;
510 unsigned long now = jiffies;
511 struct open_request **reqp, *req;
514 if (lopt == NULL || lopt->qlen == 0)
517 /* Normally all the openreqs are young and become mature
518 * (i.e. converted to established socket) for first timeout.
519 * If synack was not acknowledged for 3 seconds, it means
520 * one of the following things: synack was lost, ack was lost,
521 * rtt is high or nobody planned to ack (i.e. synflood).
522 * When server is a bit loaded, queue is populated with old
523 * open requests, reducing effective size of queue.
524 * When server is well loaded, queue size reduces to zero
525 * after several minutes of work. It is not synflood,
526 * it is normal operation. The solution is pruning
527 * too old entries overriding normal timeout, when
528 * situation becomes dangerous.
530 * Essentially, we reserve half of room for young
531 * embrions; and abort old ones without pity, if old
532 * ones are about to clog our table.
534 if (lopt->qlen>>(lopt->max_qlen_log-1)) {
535 int young = (lopt->qlen_young<<1);
538 if (lopt->qlen < young)
545 if (tp->defer_accept)
546 max_retries = tp->defer_accept;
548 budget = 2*(TCP_SYNQ_HSIZE/(TCP_TIMEOUT_INIT/TCP_SYNQ_INTERVAL));
549 i = lopt->clock_hand;
552 reqp=&lopt->syn_table[i];
553 while ((req = *reqp) != NULL) {
554 if ((long)(now - req->expires) >= 0) {
555 if ((req->retrans < thresh ||
556 (req->acked && req->retrans < max_retries))
557 && !req->class->rtx_syn_ack(sk, req, NULL)) {
560 if (req->retrans++ == 0)
562 timeo = min((TCP_TIMEOUT_INIT << req->retrans),
564 req->expires = now + timeo;
565 reqp = &req->dl_next;
569 /* Drop this request */
570 write_lock(&tp->syn_wait_lock);
571 *reqp = req->dl_next;
572 write_unlock(&tp->syn_wait_lock);
574 if (req->retrans == 0)
576 tcp_openreq_free(req);
579 reqp = &req->dl_next;
582 i = (i+1)&(TCP_SYNQ_HSIZE-1);
584 } while (--budget > 0);
586 lopt->clock_hand = i;
589 tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);
593 void tcp_delete_keepalive_timer (struct sock *sk)
596 if (timer_pending(&sk->timer) && del_timer (&sk->timer))
601 void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
604 if (!mod_timer(&sk->timer, jiffies+len))
609 void tcp_set_keepalive(struct sock *sk, int val)
612 if ((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))
615 if (val && !sk->keepopen)
616 tcp_reset_keepalive_timer(sk, keepalive_time_when(&sk->tp_pinfo.af_tcp));
618 tcp_delete_keepalive_timer(sk);
623 static void tcp_keepalive_timer (unsigned long data)
626 struct sock *sk = (struct sock *) data;
627 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
630 /* Only process if socket is not in use. */
632 if (sk->lock.users) {
633 /* Try again later. */
634 tcp_reset_keepalive_timer (sk, HZ/20);
638 if (sk->state == TCP_LISTEN) {
639 tcp_synack_timer(sk);
643 if (sk->state == TCP_FIN_WAIT2 && sk->dead) {
644 if (tp->linger2 >= 0) {
645 int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN;
648 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
652 tcp_send_active_reset(sk, GFP_ATOMIC);
656 if (!sk->keepopen || sk->state == TCP_CLOSE)
659 elapsed = keepalive_time_when(tp);
661 /* It is alive without keepalive 8) */
662 if (tp->packets_out || tp->send_head)
665 elapsed = tcp_time_stamp - tp->rcv_tstamp;
667 if (elapsed >= keepalive_time_when(tp)) {
668 if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) ||
669 (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) {
670 tcp_send_active_reset(sk, GFP_ATOMIC);
674 if (tcp_write_wakeup(sk) <= 0) {
676 elapsed = keepalive_intvl_when(tp);
678 /* If keepalive was lost due to local congestion,
681 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
684 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
685 elapsed = keepalive_time_when(tp) - elapsed;
692 tcp_reset_keepalive_timer (sk, elapsed);