mptcp: add subflow_v(4,6)_send_synack()

The send_synack() needs to be overridden for MPTCP to support TFO for
two reasons:

- There is not be enough space in the TCP options if the TFO cookie has
  to be added in the SYN+ACK with other options: MSS (4), SACK OK (2),
  Timestamps (10), Window Scale (3+1), TFO (10+2), MP_CAPABLE (12).
  MPTCPv1 specs -- RFC 8684, section B.1 [1] -- suggest to drop the TCP
  timestamps option in this case.

- The data received in the SYN has to be handled: the SKB can be
  dequeued from the subflow sk and transferred to the MPTCP sk. Counters
  need to be updated accordingly and the application can be notified at
  the end because some bytes have been received.

[1] https://www.rfc-editor.org/rfc/rfc8684.html#section-b.1

Co-developed-by: Paolo Abeni <pabeni@redhat.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
Co-developed-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Dmytro Shytyi <dmytro@shytyi.net>
Signed-off-by: Matthieu Baerts <matthieu.baerts@tessares.net>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Dmytro Shytyi 2022-11-25 23:29:51 +01:00 committed by Jakub Kicinski
parent dfc8d06030
commit 36b122baf6
4 changed files with 92 additions and 1 deletions

View File

@ -6,6 +6,51 @@
#include "protocol.h"
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req)
{
struct sock *ssk = subflow->tcp_sock;
struct sock *sk = subflow->conn;
struct sk_buff *skb;
struct tcp_sock *tp;
tp = tcp_sk(ssk);
subflow->is_mptfo = 1;
skb = skb_peek(&ssk->sk_receive_queue);
if (WARN_ON_ONCE(!skb))
return;
/* dequeue the skb from sk receive queue */
__skb_unlink(skb, &ssk->sk_receive_queue);
skb_ext_reset(skb);
skb_orphan(skb);
/* We copy the fastopen data, but that don't belong to the mptcp sequence
* space, need to offset it in the subflow sequence, see mptcp_subflow_get_map_offset()
*/
tp->copied_seq += skb->len;
subflow->ssn_offset += skb->len;
/* initialize a dummy sequence number, we will update it at MPC
* completion, if needed
*/
MPTCP_SKB_CB(skb)->map_seq = -skb->len;
MPTCP_SKB_CB(skb)->end_seq = 0;
MPTCP_SKB_CB(skb)->offset = 0;
MPTCP_SKB_CB(skb)->has_rxtstamp = TCP_SKB_CB(skb)->has_rxtstamp;
mptcp_data_lock(sk);
mptcp_set_owner_r(skb, sk);
__skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk);
mptcp_data_unlock(sk);
}
void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
const struct mptcp_options_received *mp_opt)
{

View File

@ -191,7 +191,7 @@ static void mptcp_rfree(struct sk_buff *skb)
mptcp_rmem_uncharge(sk, len);
}
static void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
{
skb_orphan(skb);
skb->sk = sk;

View File

@ -633,6 +633,7 @@ void mptcp_sock_graft(struct sock *sk, struct socket *parent);
struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk);
bool __mptcp_close(struct sock *sk, long timeout);
void mptcp_cancel_work(struct sock *sk);
void mptcp_set_owner_r(struct sk_buff *skb, struct sock *sk);
bool mptcp_addresses_equal(const struct mptcp_addr_info *a,
const struct mptcp_addr_info *b, bool use_port);
@ -842,6 +843,8 @@ bool mptcp_userspace_pm_active(const struct mptcp_sock *msk);
void mptcp_fastopen_gen_msk_ackseq(struct mptcp_sock *msk, struct mptcp_subflow_context *subflow,
const struct mptcp_options_received *mp_opt);
void mptcp_fastopen_subflow_synack_set_params(struct mptcp_subflow_context *subflow,
struct request_sock *req);
static inline bool mptcp_pm_should_add_signal(struct mptcp_sock *msk)
{

View File

@ -307,7 +307,48 @@ static struct dst_entry *subflow_v4_route_req(const struct sock *sk,
return NULL;
}
static void subflow_prep_synack(const struct sock *sk, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type)
{
struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk);
struct inet_request_sock *ireq = inet_rsk(req);
/* clear tstamp_ok, as needed depending on cookie */
if (foc && foc->len > -1)
ireq->tstamp_ok = 0;
if (synack_type == TCP_SYNACK_FASTOPEN)
mptcp_fastopen_subflow_synack_set_params(subflow, req);
}
static int subflow_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
{
subflow_prep_synack(sk, req, foc, synack_type);
return tcp_request_sock_ipv4_ops.send_synack(sk, dst, fl, req, foc,
synack_type, syn_skb);
}
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
static int subflow_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type,
struct sk_buff *syn_skb)
{
subflow_prep_synack(sk, req, foc, synack_type);
return tcp_request_sock_ipv6_ops.send_synack(sk, dst, fl, req, foc,
synack_type, syn_skb);
}
static struct dst_entry *subflow_v6_route_req(const struct sock *sk,
struct sk_buff *skb,
struct flowi *fl,
@ -1945,6 +1986,7 @@ void __init mptcp_subflow_init(void)
subflow_request_sock_ipv4_ops = tcp_request_sock_ipv4_ops;
subflow_request_sock_ipv4_ops.route_req = subflow_v4_route_req;
subflow_request_sock_ipv4_ops.send_synack = subflow_v4_send_synack;
subflow_specific = ipv4_specific;
subflow_specific.conn_request = subflow_v4_conn_request;
@ -1958,6 +2000,7 @@ void __init mptcp_subflow_init(void)
#if IS_ENABLED(CONFIG_MPTCP_IPV6)
subflow_request_sock_ipv6_ops = tcp_request_sock_ipv6_ops;
subflow_request_sock_ipv6_ops.route_req = subflow_v6_route_req;
subflow_request_sock_ipv6_ops.send_synack = subflow_v6_send_synack;
subflow_v6_specific = ipv6_specific;
subflow_v6_specific.conn_request = subflow_v6_conn_request;