rstreason: prepare for active reset
Like what we did to passive reset: only passing possible reset reason in each active reset path. No functional changes. Signed-off-by: Jason Xing <kernelxing@tencent.com> Acked-by: Matthieu Baerts (NGI0) <matttbe@kernel.org> Reviewed-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
6be49deaa0
commit
5691276b39
|
@ -670,7 +670,8 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
|
|||
void tcp_send_probe0(struct sock *);
|
||||
int tcp_write_wakeup(struct sock *, int mib);
|
||||
void tcp_send_fin(struct sock *sk);
|
||||
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
|
||||
void tcp_send_active_reset(struct sock *sk, gfp_t priority,
|
||||
enum sk_rst_reason reason);
|
||||
int tcp_send_synack(struct sock *);
|
||||
void tcp_push_one(struct sock *, unsigned int mss_now);
|
||||
void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
|
||||
|
|
|
@ -275,6 +275,7 @@
|
|||
#include <net/xfrm.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/rstreason.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ioctls.h>
|
||||
|
@ -2811,7 +2812,8 @@ void __tcp_close(struct sock *sk, long timeout)
|
|||
/* Unread data was tossed, zap the connection. */
|
||||
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, sk->sk_allocation);
|
||||
tcp_send_active_reset(sk, sk->sk_allocation,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
|
||||
/* Check zero linger _after_ checking for unread data. */
|
||||
sk->sk_prot->disconnect(sk, 0);
|
||||
|
@ -2885,7 +2887,8 @@ adjudge_to_death:
|
|||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
if (READ_ONCE(tp->linger2) < 0) {
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPABORTONLINGER);
|
||||
} else {
|
||||
|
@ -2903,7 +2906,8 @@ adjudge_to_death:
|
|||
if (sk->sk_state != TCP_CLOSE) {
|
||||
if (tcp_check_oom(sk, 0)) {
|
||||
tcp_set_state(sk, TCP_CLOSE);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
__NET_INC_STATS(sock_net(sk),
|
||||
LINUX_MIB_TCPABORTONMEMORY);
|
||||
} else if (!check_net(sock_net(sk))) {
|
||||
|
@ -3007,7 +3011,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
|||
/* The last check adjusts for discrepancy of Linux wrt. RFC
|
||||
* states
|
||||
*/
|
||||
tcp_send_active_reset(sk, gfp_any());
|
||||
tcp_send_active_reset(sk, gfp_any(), SK_RST_REASON_NOT_SPECIFIED);
|
||||
WRITE_ONCE(sk->sk_err, ECONNRESET);
|
||||
} else if (old_state == TCP_SYN_SENT)
|
||||
WRITE_ONCE(sk->sk_err, ECONNRESET);
|
||||
|
@ -4564,7 +4568,8 @@ int tcp_abort(struct sock *sk, int err)
|
|||
smp_wmb();
|
||||
sk_error_report(sk);
|
||||
if (tcp_need_reset(sk->sk_state))
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
tcp_done(sk);
|
||||
}
|
||||
|
||||
|
|
|
@ -3615,7 +3615,8 @@ void tcp_send_fin(struct sock *sk)
|
|||
* was unread data in the receive queue. This behavior is recommended
|
||||
* by RFC 2525, section 2.17. -DaveM
|
||||
*/
|
||||
void tcp_send_active_reset(struct sock *sk, gfp_t priority)
|
||||
void tcp_send_active_reset(struct sock *sk, gfp_t priority,
|
||||
enum sk_rst_reason reason)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <net/tcp.h>
|
||||
#include <net/rstreason.h>
|
||||
|
||||
static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
|
||||
{
|
||||
|
@ -127,7 +128,8 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
|
|||
(!tp->snd_wnd && !tp->packets_out))
|
||||
do_reset = true;
|
||||
if (do_reset)
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
tcp_done(sk);
|
||||
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
|
||||
return 1;
|
||||
|
@ -768,7 +770,7 @@ static void tcp_keepalive_timer (struct timer_list *t)
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_NOT_SPECIFIED);
|
||||
goto death;
|
||||
}
|
||||
|
||||
|
@ -795,7 +797,8 @@ static void tcp_keepalive_timer (struct timer_list *t)
|
|||
icsk->icsk_probes_out > 0) ||
|
||||
(user_timeout == 0 &&
|
||||
icsk->icsk_probes_out >= keepalive_probes(tp))) {
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(sk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
tcp_write_err(sk);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#endif
|
||||
#include <net/mptcp.h>
|
||||
#include <net/xfrm.h>
|
||||
#include <net/rstreason.h>
|
||||
#include <asm/ioctls.h>
|
||||
#include "protocol.h"
|
||||
#include "mib.h"
|
||||
|
@ -2569,7 +2570,8 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
|
|||
|
||||
slow = lock_sock_fast(tcp_sk);
|
||||
if (tcp_sk->sk_state != TCP_CLOSE) {
|
||||
tcp_send_active_reset(tcp_sk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(tcp_sk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
tcp_set_state(tcp_sk, TCP_CLOSE);
|
||||
}
|
||||
unlock_sock_fast(tcp_sk, slow);
|
||||
|
|
|
@ -414,7 +414,7 @@ void mptcp_subflow_reset(struct sock *ssk)
|
|||
/* must hold: tcp_done() could drop last reference on parent */
|
||||
sock_hold(sk);
|
||||
|
||||
tcp_send_active_reset(ssk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(ssk, GFP_ATOMIC, SK_RST_REASON_NOT_SPECIFIED);
|
||||
tcp_done(ssk);
|
||||
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
|
||||
mptcp_schedule_work(sk);
|
||||
|
@ -1350,7 +1350,8 @@ reset:
|
|||
tcp_set_state(ssk, TCP_CLOSE);
|
||||
while ((skb = skb_peek(&ssk->sk_receive_queue)))
|
||||
sk_eat_skb(ssk, skb);
|
||||
tcp_send_active_reset(ssk, GFP_ATOMIC);
|
||||
tcp_send_active_reset(ssk, GFP_ATOMIC,
|
||||
SK_RST_REASON_NOT_SPECIFIED);
|
||||
WRITE_ONCE(subflow->data_avail, false);
|
||||
return false;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue