Skip to content

Commit

Permalink
tcp: provide earliest departure time in skb->tstamp
Browse files Browse the repository at this point in the history
Switch internal TCP skb->skb_mstamp to skb->skb_mstamp_ns,
from usec units to nsec units.

Do not clear skb->tstamp before entering IP stacks in TX,
so that qdisc or devices can implement pacing based on the
earliest departure time instead of socket sk->sk_pacing_rate

Packets are fed with tcp_wstamp_ns, and following patch
will update tcp_wstamp_ns when both TCP and sch_fq switch to
the earliest departure time mechanism.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
Eric Dumazet authored and davem330 committed Sep 22, 2018
1 parent 9799ccb commit d3edd06
Show file tree
Hide file tree
Showing 6 changed files with 13 additions and 14 deletions.
2 changes: 1 addition & 1 deletion include/linux/skbuff.h
Expand Up @@ -689,7 +689,7 @@ struct sk_buff {

union {
ktime_t tstamp;
u64 skb_mstamp;
u64 skb_mstamp_ns; /* earliest departure time */
};
/*
* This is the control buffer. It is free to use for every
Expand Down
6 changes: 3 additions & 3 deletions include/net/tcp.h
Expand Up @@ -761,13 +761,13 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)

static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
}

/* provide the departure time in us unit */
static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb)
{
return skb->skb_mstamp;
return div_u64(skb->skb_mstamp_ns, NSEC_PER_USEC);
}


Expand Down Expand Up @@ -813,7 +813,7 @@ struct tcp_skb_cb {
#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
#define TCPCB_LOST 0x04 /* SKB is lost */
#define TCPCB_TAGBITS 0x07 /* All tag bits */
#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp_ns) */
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
TCPCB_REPAIRED)
Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/syncookies.c
Expand Up @@ -88,7 +88,7 @@ u64 cookie_init_timestamp(struct request_sock *req)
ts <<= TSBITS;
ts |= options;
}
return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
return (u64)ts * (NSEC_PER_SEC / TCP_TS_HZ);
}


Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/tcp.c
Expand Up @@ -1295,7 +1295,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
copy = size_goal;

/* All packets are restored as if they have
* already been sent. skb_mstamp isn't set to
* already been sent. skb_mstamp_ns isn't set to
* avoid wrong rtt estimation.
*/
if (tp->repair)
Expand Down
13 changes: 6 additions & 7 deletions net/ipv4/tcp_output.c
Expand Up @@ -1014,7 +1014,7 @@ static void tcp_internal_pacing(struct sock *sk, const struct sk_buff *skb)

static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
{
skb->skb_mstamp = tp->tcp_mstamp;
skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue);
}

Expand Down Expand Up @@ -1061,7 +1061,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
if (unlikely(!skb))
return -ENOBUFS;
}
skb->skb_mstamp = tp->tcp_mstamp;
skb->skb_mstamp_ns = tp->tcp_wstamp_ns;

inet = inet_sk(sk);
tcb = TCP_SKB_CB(skb);
Expand Down Expand Up @@ -1165,8 +1165,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
skb_shinfo(skb)->gso_size = tcp_skb_mss(skb);

/* Our usage of tstamp should remain private */
skb->tstamp = 0;
/* Leave earliest departure time in skb->tstamp (skb->skb_mstamp_ns) */

/* Cleanup our debris for IP stacks */
memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
Expand Down Expand Up @@ -3221,10 +3220,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts))
skb->skb_mstamp = cookie_init_timestamp(req);
skb->skb_mstamp_ns = cookie_init_timestamp(req);
else
#endif
skb->skb_mstamp = tcp_clock_us();
skb->skb_mstamp_ns = tcp_clock_ns();

#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
Expand Down Expand Up @@ -3440,7 +3439,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)

err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation);

syn->skb_mstamp = syn_data->skb_mstamp;
syn->skb_mstamp_ns = syn_data->skb_mstamp_ns;

/* Now full SYN+DATA was cloned and sent (or not),
* remove the SYN from the original skb (syn_data)
Expand Down
2 changes: 1 addition & 1 deletion net/ipv4/tcp_timer.c
Expand Up @@ -360,7 +360,7 @@ static void tcp_probe_timer(struct sock *sk)
*/
start_ts = tcp_skb_timestamp(skb);
if (!start_ts)
skb->skb_mstamp = tp->tcp_mstamp;
skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
else if (icsk->icsk_user_timeout &&
(s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort;
Expand Down

0 comments on commit d3edd06

Please sign in to comment.