net: fix __dst_negative_advice() race
__dst_negative_advice() does not enforce proper RCU rules when
sk->dst_cache must be cleared, leading to possible UAF.
RCU rules are that we must first clear sk->sk_dst_cache,
then call dst_release(old_dst).
Note that sk_dst_reset(sk) is implementing this protocol correctly,
while __dst_negative_advice() uses the wrong order.
Given that ip6_negative_advice() has special logic
against RTF_CACHE, this means each of the three ->negative_advice()
existing methods must perform the sk_dst_reset() themselves.
Note the check against NULL dst is centralized in
__dst_negative_advice(), there is no need to duplicate
it in various callbacks.
Many thanks to Clement Lecigne for tracking this issue.
This old bug became visible after the blamed commit, using UDP sockets.
Fixes: a87cb3e48e
("net: Facility to report route quality of connected sockets")
Reported-by: Clement Lecigne <clecigne@google.com>
Diagnosed-by: Clement Lecigne <clecigne@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tom Herbert <tom@herbertland.com>
Reviewed-by: David Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20240528114353.1794151-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
068648aab7
commit
92f1655aa2
|
@ -24,7 +24,7 @@ struct dst_ops {
|
|||
void (*destroy)(struct dst_entry *);
|
||||
void (*ifdown)(struct dst_entry *,
|
||||
struct net_device *dev);
|
||||
struct dst_entry * (*negative_advice)(struct dst_entry *);
|
||||
void (*negative_advice)(struct sock *sk, struct dst_entry *);
|
||||
void (*link_failure)(struct sk_buff *);
|
||||
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu,
|
||||
|
|
|
@ -2063,17 +2063,10 @@ sk_dst_get(const struct sock *sk)
|
|||
|
||||
static inline void __dst_negative_advice(struct sock *sk)
|
||||
{
|
||||
struct dst_entry *ndst, *dst = __sk_dst_get(sk);
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
|
||||
if (dst && dst->ops->negative_advice) {
|
||||
ndst = dst->ops->negative_advice(dst);
|
||||
|
||||
if (ndst != dst) {
|
||||
rcu_assign_pointer(sk->sk_dst_cache, ndst);
|
||||
sk_tx_queue_clear(sk);
|
||||
WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
|
||||
}
|
||||
}
|
||||
if (dst && dst->ops->negative_advice)
|
||||
dst->ops->negative_advice(sk, dst);
|
||||
}
|
||||
|
||||
static inline void dst_negative_advice(struct sock *sk)
|
||||
|
|
|
@ -129,7 +129,8 @@ struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
|
|||
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
|
||||
INDIRECT_CALLABLE_SCOPE
|
||||
unsigned int ipv4_mtu(const struct dst_entry *dst);
|
||||
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
|
||||
static void ipv4_negative_advice(struct sock *sk,
|
||||
struct dst_entry *dst);
|
||||
static void ipv4_link_failure(struct sk_buff *skb);
|
||||
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
|
||||
struct sk_buff *skb, u32 mtu,
|
||||
|
@ -825,22 +826,15 @@ static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buf
|
|||
__ip_do_redirect(rt, skb, &fl4, true);
|
||||
}
|
||||
|
||||
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
|
||||
static void ipv4_negative_advice(struct sock *sk,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
struct rtable *rt = dst_rtable(dst);
|
||||
struct dst_entry *ret = dst;
|
||||
|
||||
if (rt) {
|
||||
if (dst->obsolete > 0) {
|
||||
ip_rt_put(rt);
|
||||
ret = NULL;
|
||||
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
|
||||
rt->dst.expires) {
|
||||
ip_rt_put(rt);
|
||||
ret = NULL;
|
||||
}
|
||||
}
|
||||
return ret;
|
||||
if ((dst->obsolete > 0) ||
|
||||
(rt->rt_flags & RTCF_REDIRECTED) ||
|
||||
rt->dst.expires)
|
||||
sk_dst_reset(sk);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -87,7 +87,8 @@ struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
|
|||
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
|
||||
INDIRECT_CALLABLE_SCOPE
|
||||
unsigned int ip6_mtu(const struct dst_entry *dst);
|
||||
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
|
||||
static void ip6_negative_advice(struct sock *sk,
|
||||
struct dst_entry *dst);
|
||||
static void ip6_dst_destroy(struct dst_entry *);
|
||||
static void ip6_dst_ifdown(struct dst_entry *,
|
||||
struct net_device *dev);
|
||||
|
@ -2770,24 +2771,24 @@ INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
|
|||
}
|
||||
EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
|
||||
|
||||
static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
|
||||
static void ip6_negative_advice(struct sock *sk,
|
||||
struct dst_entry *dst)
|
||||
{
|
||||
struct rt6_info *rt = dst_rt6_info(dst);
|
||||
|
||||
if (rt) {
|
||||
if (rt->rt6i_flags & RTF_CACHE) {
|
||||
rcu_read_lock();
|
||||
if (rt6_check_expired(rt)) {
|
||||
rt6_remove_exception_rt(rt);
|
||||
dst = NULL;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
if (rt->rt6i_flags & RTF_CACHE) {
|
||||
rcu_read_lock();
|
||||
if (rt6_check_expired(rt)) {
|
||||
/* counteract the dst_release() in sk_dst_reset() */
|
||||
dst_hold(dst);
|
||||
sk_dst_reset(sk);
|
||||
|
||||
rt6_remove_exception_rt(rt);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return;
|
||||
}
|
||||
return dst;
|
||||
sk_dst_reset(sk);
|
||||
}
|
||||
|
||||
static void ip6_link_failure(struct sk_buff *skb)
|
||||
|
|
|
@ -3910,15 +3910,10 @@ static void xfrm_link_failure(struct sk_buff *skb)
|
|||
/* Impossible. Such dst must be popped before reaches point of failure. */
|
||||
}
|
||||
|
||||
static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
|
||||
static void xfrm_negative_advice(struct sock *sk, struct dst_entry *dst)
|
||||
{
|
||||
if (dst) {
|
||||
if (dst->obsolete) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
}
|
||||
return dst;
|
||||
if (dst->obsolete)
|
||||
sk_dst_reset(sk);
|
||||
}
|
||||
|
||||
static void xfrm_init_pmtu(struct xfrm_dst **bundle, int nr)
|
||||
|
|
Loading…
Reference in New Issue