update for HEAD-2003021201
[reactos.git] / drivers / net / tcpip / transport / tcp / tcp_timer.c
1 /*
2  * COPYRIGHT:   See COPYING in the top level directory
3  * PROJECT:     ReactOS TCP/IP protocol driver
4  * FILE:        transport/tcp/tcp_input.c
5  * PURPOSE:     Transmission Control Protocol
6  * PROGRAMMERS: Casper S. Hornstrup (chorns@users.sourceforge.net)
7  * REVISIONS:
8  *   CSH 15-01-2003 Imported from linux kernel 2.4.20
9  */
10
11 /*
12  * INET         An implementation of the TCP/IP protocol suite for the LINUX
13  *              operating system.  INET is implemented using the  BSD Socket
14  *              interface as the means of communication with the user level.
15  *
16  *              Implementation of the Transmission Control Protocol(TCP).
17  *
18  * Version:     $Id$
19  *
20  * Authors:     Ross Biro, <bir7@leland.Stanford.Edu>
21  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
22  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
23  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
24  *              Florian La Roche, <flla@stud.uni-sb.de>
25  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
26  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
27  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
28  *              Matthew Dillon, <dillon@apollo.west.oic.com>
29  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
30  *              Jorge Cwik, <jorge@laser.satlink.net>
31  */
32
33 #if 0
34 #include <net/tcp.h>
35 #else
36 #include "linux.h"
37 #include "tcpcore.h"
38 #endif
39
40 int sysctl_tcp_syn_retries = TCP_SYN_RETRIES; 
41 int sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; 
42 //int sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
43 int sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
44 //int sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
45 int sysctl_tcp_retries1 = TCP_RETR1;
46 int sysctl_tcp_retries2 = TCP_RETR2;
47 int sysctl_tcp_orphan_retries;
48
49 static void tcp_write_timer(unsigned long);
50 static void tcp_delack_timer(unsigned long);
51 static void tcp_keepalive_timer (unsigned long data);
52
53 //const char timer_bug_msg[] = KERN_DEBUG "tcpbug: unknown timer value\n";
54
55 /*
56  * Using different timers for retransmit, delayed acks and probes
57  * We may wish use just one timer maintaining a list of expire jiffies 
58  * to optimize.
59  */
60
61 void tcp_init_xmit_timers(struct sock *sk)
62 {
63 #if 0
64         struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
65
66         init_timer(&tp->retransmit_timer);
67         tp->retransmit_timer.function=&tcp_write_timer;
68         tp->retransmit_timer.data = (unsigned long) sk;
69         tp->pending = 0;
70
71         init_timer(&tp->delack_timer);
72         tp->delack_timer.function=&tcp_delack_timer;
73         tp->delack_timer.data = (unsigned long) sk;
74         tp->ack.pending = 0;
75
76         init_timer(&sk->timer);
77         sk->timer.function=&tcp_keepalive_timer;
78         sk->timer.data = (unsigned long) sk;
79 #endif
80 }
81
82 void tcp_clear_xmit_timers(struct sock *sk)
83 {
84 #if 0
85         struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
86
87         tp->pending = 0;
88         if (timer_pending(&tp->retransmit_timer) &&
89             del_timer(&tp->retransmit_timer))
90                 __sock_put(sk);
91
92         tp->ack.pending = 0;
93         tp->ack.blocked = 0;
94         if (timer_pending(&tp->delack_timer) &&
95             del_timer(&tp->delack_timer))
96                 __sock_put(sk);
97
98         if(timer_pending(&sk->timer) && del_timer(&sk->timer))
99                 __sock_put(sk);
100 #endif
101 }
102
103 static void tcp_write_err(struct sock *sk)
104 {
105 #if 0
106         sk->err = sk->err_soft ? : ETIMEDOUT;
107         sk->error_report(sk);
108
109         tcp_done(sk);
110         NET_INC_STATS_BH(TCPAbortOnTimeout);
111 #endif
112 }
113
114 /* Do not allow orphaned sockets to eat all our resources.
115  * This is direct violation of TCP specs, but it is required
116  * to prevent DoS attacks. It is called when a retransmission timeout
117  * or zero probe timeout occurs on orphaned socket.
118  *
119  * Criterium is still not confirmed experimentally and may change.
120  * We kill the socket, if:
121  * 1. If number of orphaned sockets exceeds an administratively configured
122  *    limit.
123  * 2. If we have strong memory pressure.
124  */
125 static int tcp_out_of_resources(struct sock *sk, int do_reset)
126 {
127 #if 0
128         struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
129         int orphans = atomic_read(&tcp_orphan_count);
130
131         /* If peer does not open window for long time, or did not transmit 
132          * anything for long time, penalize it. */
133         if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
134                 orphans <<= 1;
135
136         /* If some dubious ICMP arrived, penalize even more. */
137         if (sk->err_soft)
138                 orphans <<= 1;
139
140         if (orphans >= sysctl_tcp_max_orphans ||
141             (sk->wmem_queued > SOCK_MIN_SNDBUF &&
142              atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
143                 if (net_ratelimit())
144                         printk(KERN_INFO "Out of socket memory\n");
145
146                 /* Catch exceptional cases, when connection requires reset.
147                  *      1. Last segment was sent recently. */
148                 if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
149                     /*  2. Window is closed. */
150                     (!tp->snd_wnd && !tp->packets_out))
151                         do_reset = 1;
152                 if (do_reset)
153                         tcp_send_active_reset(sk, GFP_ATOMIC);
154                 tcp_done(sk);
155                 NET_INC_STATS_BH(TCPAbortOnMemory);
156                 return 1;
157         }
158         return 0;
159 #else
160         return 0;
161 #endif
162 }
163
164 /* Calculate maximal number or retries on an orphaned socket. */
165 static int tcp_orphan_retries(struct sock *sk, int alive)
166 {
167 #if 0
168         int retries = sysctl_tcp_orphan_retries; /* May be zero. */
169
170         /* We know from an ICMP that something is wrong. */
171         if (sk->err_soft && !alive)
172                 retries = 0;
173
174         /* However, if socket sent something recently, select some safe
175          * number of retries. 8 corresponds to >100 seconds with minimal
176          * RTO of 200msec. */
177         if (retries == 0 && alive)
178                 retries = 8;
179         return retries;
180 #else
181         return 0;
182 #endif
183 }
184
185 /* A write timeout has occurred. Process the after effects. */
186 static int tcp_write_timeout(struct sock *sk)
187 {
188 #if 0
189         struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
190         int retry_until;
191
192         if ((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV)) {
193                 if (tp->retransmits)
194                         dst_negative_advice(&sk->dst_cache);
195                 retry_until = tp->syn_retries ? : sysctl_tcp_syn_retries;
196         } else {
197                 if (tp->retransmits >= sysctl_tcp_retries1) {
198                         /* NOTE. draft-ietf-tcpimpl-pmtud-01.txt requires pmtu black
199                            hole detection. :-(
200
201                            It is place to make it. It is not made. I do not want
202                            to make it. It is disguisting. It does not work in any
203                            case. Let me to cite the same draft, which requires for
204                            us to implement this:
205
206    "The one security concern raised by this memo is that ICMP black holes
207    are often caused by over-zealous security administrators who block
208    all ICMP messages.  It is vitally important that those who design and
209    deploy security systems understand the impact of strict filtering on
210    upper-layer protocols.  The safest web site in the world is worthless
211    if most TCP implementations cannot transfer data from it.  It would
212    be far nicer to have all of the black holes fixed rather than fixing
213    all of the TCP implementations."
214
215                            Golden words :-).
216                    */
217
218                         dst_negative_advice(&sk->dst_cache);
219                 }
220
221                 retry_until = sysctl_tcp_retries2;
222                 if (sk->dead) {
223                         int alive = (tp->rto < TCP_RTO_MAX);
224  
225                         retry_until = tcp_orphan_retries(sk, alive);
226
227                         if (tcp_out_of_resources(sk, alive || tp->retransmits < retry_until))
228                                 return 1;
229                 }
230         }
231
232         if (tp->retransmits >= retry_until) {
233                 /* Has it gone just too far? */
234                 tcp_write_err(sk);
235                 return 1;
236         }
237         return 0;
238 #else
239         return 0;
240 #endif
241 }
242
243 static void tcp_delack_timer(unsigned long data)
244 {
245 #if 0
246         struct sock *sk = (struct sock*)data;
247         struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
248
249         bh_lock_sock(sk);
250         if (sk->lock.users) {
251                 /* Try again later. */
252                 tp->ack.blocked = 1;
253                 NET_INC_STATS_BH(DelayedACKLocked);
254                 if (!mod_timer(&tp->delack_timer, jiffies + TCP_DELACK_MIN))
255                         sock_hold(sk);
256                 goto out_unlock;
257         }
258
259         tcp_mem_reclaim(sk);
260
261         if (sk->state == TCP_CLOSE || !(tp->ack.pending&TCP_ACK_TIMER))
262                 goto out;
263
264         if ((long)(tp->ack.timeout - jiffies) > 0) {
265                 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
266                         sock_hold(sk);
267                 goto out;
268         }
269         tp->ack.pending &= ~TCP_ACK_TIMER;
270
271         if (skb_queue_len(&tp->ucopy.prequeue)) {
272                 struct sk_buff *skb;
273
274                 net_statistics[smp_processor_id()*2].TCPSchedulerFailed += skb_queue_len(&tp->ucopy.prequeue);
275
276                 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
277                         sk->backlog_rcv(sk, skb);
278
279                 tp->ucopy.memory = 0;
280         }
281
282         if (tcp_ack_scheduled(tp)) {
283                 if (!tp->ack.pingpong) {
284                         /* Delayed ACK missed: inflate ATO. */
285                         tp->ack.ato = min(tp->ack.ato << 1, tp->rto);
286                 } else {
287                         /* Delayed ACK missed: leave pingpong mode and
288                          * deflate ATO.
289                          */
290                         tp->ack.pingpong = 0;
291                         tp->ack.ato = TCP_ATO_MIN;
292                 }
293                 tcp_send_ack(sk);
294                 NET_INC_STATS_BH(DelayedACKs);
295         }
296         TCP_CHECK_TIMER(sk);
297
298 out:
299         if (tcp_memory_pressure)
300                 tcp_mem_reclaim(sk);
301 out_unlock:
302         bh_unlock_sock(sk);
303         sock_put(sk);
304 #endif
305 }
306
307 static void tcp_probe_timer(struct sock *sk)
308 {
309 #if 0
310         struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
311         int max_probes;
312
313         if (tp->packets_out || !tp->send_head) {
314                 tp->probes_out = 0;
315                 return;
316         }
317
318         /* *WARNING* RFC 1122 forbids this
319          *
320          * It doesn't AFAIK, because we kill the retransmit timer -AK
321          *
322          * FIXME: We ought not to do it, Solaris 2.5 actually has fixing
323          * this behaviour in Solaris down as a bug fix. [AC]
324          *
325          * Let me to explain. probes_out is zeroed by incoming ACKs
326          * even if they advertise zero window. Hence, connection is killed only
327          * if we received no ACKs for normal connection timeout. It is not killed
328          * only because window stays zero for some time, window may be zero
329          * until armageddon and even later. We are in full accordance
330          * with RFCs, only probe timer combines both retransmission timeout
331          * and probe timeout in one bottle.                             --ANK
332          */
333         max_probes = sysctl_tcp_retries2;
334
335         if (sk->dead) {
336                 int alive = ((tp->rto<<tp->backoff) < TCP_RTO_MAX);
337  
338                 max_probes = tcp_orphan_retries(sk, alive);
339
340                 if (tcp_out_of_resources(sk, alive || tp->probes_out <= max_probes))
341                         return;
342         }
343
344         if (tp->probes_out > max_probes) {
345                 tcp_write_err(sk);
346         } else {
347                 /* Only send another probe if we didn't close things up. */
348                 tcp_send_probe0(sk);
349         }
350 #endif
351 }
352
353 /*
354  *      The TCP retransmit timer.
355  */
356
357 static void tcp_retransmit_timer(struct sock *sk)
358 {
359 #if 0
360         struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
361
362         if (tp->packets_out == 0)
363                 goto out;
364
365         BUG_TRAP(!skb_queue_empty(&sk->write_queue));
366
367         if (tp->snd_wnd == 0 && !sk->dead &&
368             !((1<<sk->state)&(TCPF_SYN_SENT|TCPF_SYN_RECV))) {
369                 /* Receiver dastardly shrinks window. Our retransmits
370                  * become zero probes, but we should not timeout this
371                  * connection. If the socket is an orphan, time it out,
372                  * we cannot allow such beasts to hang infinitely.
373                  */
374 #ifdef TCP_DEBUG
375                 if (net_ratelimit())
376                         printk(KERN_DEBUG "TCP: Treason uncloaked! Peer %u.%u.%u.%u:%u/%u shrinks window %u:%u. Repaired.\n",
377                                NIPQUAD(sk->daddr), htons(sk->dport), sk->num,
378                                tp->snd_una, tp->snd_nxt);
379 #endif
380                 if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) {
381                         tcp_write_err(sk);
382                         goto out;
383                 }
384                 tcp_enter_loss(sk, 0);
385                 tcp_retransmit_skb(sk, skb_peek(&sk->write_queue));
386                 __sk_dst_reset(sk);
387                 goto out_reset_timer;
388         }
389
390         if (tcp_write_timeout(sk))
391                 goto out;
392
393         if (tp->retransmits == 0) {
394                 if (tp->ca_state == TCP_CA_Disorder || tp->ca_state == TCP_CA_Recovery) {
395                         if (tp->sack_ok) {
396                                 if (tp->ca_state == TCP_CA_Recovery)
397                                         NET_INC_STATS_BH(TCPSackRecoveryFail);
398                                 else
399                                         NET_INC_STATS_BH(TCPSackFailures);
400                         } else {
401                                 if (tp->ca_state == TCP_CA_Recovery)
402                                         NET_INC_STATS_BH(TCPRenoRecoveryFail);
403                                 else
404                                         NET_INC_STATS_BH(TCPRenoFailures);
405                         }
406                 } else if (tp->ca_state == TCP_CA_Loss) {
407                         NET_INC_STATS_BH(TCPLossFailures);
408                 } else {
409                         NET_INC_STATS_BH(TCPTimeouts);
410                 }
411         }
412
413         tcp_enter_loss(sk, 0);
414
415         if (tcp_retransmit_skb(sk, skb_peek(&sk->write_queue)) > 0) {
416                 /* Retransmission failed because of local congestion,
417                  * do not backoff.
418                  */
419                 if (!tp->retransmits)
420                         tp->retransmits=1;
421                 tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS,
422                                      min(tp->rto, TCP_RESOURCE_PROBE_INTERVAL));
423                 goto out;
424         }
425
426         /* Increase the timeout each time we retransmit.  Note that
427          * we do not increase the rtt estimate.  rto is initialized
428          * from rtt, but increases here.  Jacobson (SIGCOMM 88) suggests
429          * that doubling rto each time is the least we can get away with.
430          * In KA9Q, Karn uses this for the first few times, and then
431          * goes to quadratic.  netBSD doubles, but only goes up to *64,
432          * and clamps at 1 to 64 sec afterwards.  Note that 120 sec is
433          * defined in the protocol as the maximum possible RTT.  I guess
434          * we'll have to use something other than TCP to talk to the
435          * University of Mars.
436          *
437          * PAWS allows us longer timeouts and large windows, so once
438          * implemented ftp to mars will work nicely. We will have to fix
439          * the 120 second clamps though!
440          */
441         tp->backoff++;
442         tp->retransmits++;
443
444 out_reset_timer:
445         tp->rto = min(tp->rto << 1, TCP_RTO_MAX);
446         tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
447         if (tp->retransmits > sysctl_tcp_retries1)
448                 __sk_dst_reset(sk);
449
450 out:;
451 #endif
452 }
453
454 static void tcp_write_timer(unsigned long data)
455 {
456 #if 0
457         struct sock *sk = (struct sock*)data;
458         struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
459         int event;
460
461         bh_lock_sock(sk);
462         if (sk->lock.users) {
463                 /* Try again later */
464                 if (!mod_timer(&tp->retransmit_timer, jiffies + (HZ/20)))
465                         sock_hold(sk);
466                 goto out_unlock;
467         }
468
469         if (sk->state == TCP_CLOSE || !tp->pending)
470                 goto out;
471
472         if ((long)(tp->timeout - jiffies) > 0) {
473                 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
474                         sock_hold(sk);
475                 goto out;
476         }
477
478         event = tp->pending;
479         tp->pending = 0;
480
481         switch (event) {
482         case TCP_TIME_RETRANS:
483                 tcp_retransmit_timer(sk);
484                 break;
485         case TCP_TIME_PROBE0:
486                 tcp_probe_timer(sk);
487                 break;
488         }
489         TCP_CHECK_TIMER(sk);
490
491 out:
492         tcp_mem_reclaim(sk);
493 out_unlock:
494         bh_unlock_sock(sk);
495         sock_put(sk);
496 #endif
497 }
498
499 /*
500  *      Timer for listening sockets
501  */
502
503 static void tcp_synack_timer(struct sock *sk)
504 {
505 #if 0
506         struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
507         struct tcp_listen_opt *lopt = tp->listen_opt;
508         int max_retries = tp->syn_retries ? : sysctl_tcp_synack_retries;
509         int thresh = max_retries;
510         unsigned long now = jiffies;
511         struct open_request **reqp, *req;
512         int i, budget;
513
514         if (lopt == NULL || lopt->qlen == 0)
515                 return;
516
517         /* Normally all the openreqs are young and become mature
518          * (i.e. converted to established socket) for first timeout.
519          * If synack was not acknowledged for 3 seconds, it means
520          * one of the following things: synack was lost, ack was lost,
521          * rtt is high or nobody planned to ack (i.e. synflood).
522          * When server is a bit loaded, queue is populated with old
523          * open requests, reducing effective size of queue.
524          * When server is well loaded, queue size reduces to zero
525          * after several minutes of work. It is not synflood,
526          * it is normal operation. The solution is pruning
527          * too old entries overriding normal timeout, when
528          * situation becomes dangerous.
529          *
530          * Essentially, we reserve half of room for young
531          * embrions; and abort old ones without pity, if old
532          * ones are about to clog our table.
533          */
534         if (lopt->qlen>>(lopt->max_qlen_log-1)) {
535                 int young = (lopt->qlen_young<<1);
536
537                 while (thresh > 2) {
538                         if (lopt->qlen < young)
539                                 break;
540                         thresh--;
541                         young <<= 1;
542                 }
543         }
544
545         if (tp->defer_accept)
546                 max_retries = tp->defer_accept;
547
548         budget = 2*(TCP_SYNQ_HSIZE/(TCP_TIMEOUT_INIT/TCP_SYNQ_INTERVAL));
549         i = lopt->clock_hand;
550
551         do {
552                 reqp=&lopt->syn_table[i];
553                 while ((req = *reqp) != NULL) {
554                         if ((long)(now - req->expires) >= 0) {
555                                 if ((req->retrans < thresh ||
556                                      (req->acked && req->retrans < max_retries))
557                                     && !req->class->rtx_syn_ack(sk, req, NULL)) {
558                                         unsigned long timeo;
559
560                                         if (req->retrans++ == 0)
561                                                 lopt->qlen_young--;
562                                         timeo = min((TCP_TIMEOUT_INIT << req->retrans),
563                                                     TCP_RTO_MAX);
564                                         req->expires = now + timeo;
565                                         reqp = &req->dl_next;
566                                         continue;
567                                 }
568
569                                 /* Drop this request */
570                                 write_lock(&tp->syn_wait_lock);
571                                 *reqp = req->dl_next;
572                                 write_unlock(&tp->syn_wait_lock);
573                                 lopt->qlen--;
574                                 if (req->retrans == 0)
575                                         lopt->qlen_young--;
576                                 tcp_openreq_free(req);
577                                 continue;
578                         }
579                         reqp = &req->dl_next;
580                 }
581
582                 i = (i+1)&(TCP_SYNQ_HSIZE-1);
583
584         } while (--budget > 0);
585
586         lopt->clock_hand = i;
587
588         if (lopt->qlen)
589                 tcp_reset_keepalive_timer(sk, TCP_SYNQ_INTERVAL);
590 #endif
591 }
592
593 void tcp_delete_keepalive_timer (struct sock *sk)
594 {
595 #if 0
596         if (timer_pending(&sk->timer) && del_timer (&sk->timer))
597                 __sock_put(sk);
598 #endif
599 }
600
601 void tcp_reset_keepalive_timer (struct sock *sk, unsigned long len)
602 {
603 #if 0
604         if (!mod_timer(&sk->timer, jiffies+len))
605                 sock_hold(sk);
606 #endif
607 }
608
609 void tcp_set_keepalive(struct sock *sk, int val)
610 {
611 #if 0
612         if ((1<<sk->state)&(TCPF_CLOSE|TCPF_LISTEN))
613                 return;
614
615         if (val && !sk->keepopen)
616                 tcp_reset_keepalive_timer(sk, keepalive_time_when(&sk->tp_pinfo.af_tcp));
617         else if (!val)
618                 tcp_delete_keepalive_timer(sk);
619 #endif
620 }
621
622
623 static void tcp_keepalive_timer (unsigned long data)
624 {
625 #if 0
626         struct sock *sk = (struct sock *) data;
627         struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
628         __u32 elapsed;
629
630         /* Only process if socket is not in use. */
631         bh_lock_sock(sk);
632         if (sk->lock.users) {
633                 /* Try again later. */ 
634                 tcp_reset_keepalive_timer (sk, HZ/20);
635                 goto out;
636         }
637
638         if (sk->state == TCP_LISTEN) {
639                 tcp_synack_timer(sk);
640                 goto out;
641         }
642
643         if (sk->state == TCP_FIN_WAIT2 && sk->dead) {
644                 if (tp->linger2 >= 0) {
645                         int tmo = tcp_fin_time(tp) - TCP_TIMEWAIT_LEN;
646
647                         if (tmo > 0) {
648                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
649                                 goto out;
650                         }
651                 }
652                 tcp_send_active_reset(sk, GFP_ATOMIC);
653                 goto death;
654         }
655
656         if (!sk->keepopen || sk->state == TCP_CLOSE)
657                 goto out;
658
659         elapsed = keepalive_time_when(tp);
660
661         /* It is alive without keepalive 8) */
662         if (tp->packets_out || tp->send_head)
663                 goto resched;
664
665         elapsed = tcp_time_stamp - tp->rcv_tstamp;
666
667         if (elapsed >= keepalive_time_when(tp)) {
668                 if ((!tp->keepalive_probes && tp->probes_out >= sysctl_tcp_keepalive_probes) ||
669                      (tp->keepalive_probes && tp->probes_out >= tp->keepalive_probes)) {
670                         tcp_send_active_reset(sk, GFP_ATOMIC);
671                         tcp_write_err(sk);
672                         goto out;
673                 }
674                 if (tcp_write_wakeup(sk) <= 0) {
675                         tp->probes_out++;
676                         elapsed = keepalive_intvl_when(tp);
677                 } else {
678                         /* If keepalive was lost due to local congestion,
679                          * try harder.
680                          */
681                         elapsed = TCP_RESOURCE_PROBE_INTERVAL;
682                 }
683         } else {
684                 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
685                 elapsed = keepalive_time_when(tp) - elapsed;
686         }
687
688         TCP_CHECK_TIMER(sk);
689         tcp_mem_reclaim(sk);
690
691 resched:
692         tcp_reset_keepalive_timer (sk, elapsed);
693         goto out;
694
695 death:  
696         tcp_done(sk);
697
698 out:
699         bh_unlock_sock(sk);
700         sock_put(sk);
701 #endif
702 }