Skip to content
Permalink
Browse files
net:ethernet:rmnet: Add support for Mapv5 Uplink packet
Adding Support for Mapv5 uplink packet.
Based on the configuration Request HW for csum offload
by setting the csum_valid_required of Mapv5 packet.

Signed-off-by: Sharath Chandra Vurukala <sharathv@codeaurora.org>
  • Loading branch information
Sharath Chandra Vurukala authored and intel-lab-lkp committed Feb 11, 2021
1 parent 293142d commit 7f0a1e35c1d1c17de5873aded88d5dadfedce2fb
Show file tree
Hide file tree
Showing 5 changed files with 105 additions and 12 deletions.
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation.
/* Copyright (c) 2013-2014, 2016-2018, 2021 The Linux Foundation.
* All rights reserved.
*
* RMNET Data configuration engine
@@ -57,6 +57,7 @@ struct rmnet_priv_stats {
u64 csum_fragmented_pkt;
u64 csum_skipped;
u64 csum_sw;
u64 csum_hw;
};

struct rmnet_priv {
@@ -131,26 +131,35 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
struct rmnet_port *port, u8 mux_id,
struct net_device *orig_dev)
{
int required_headroom, additional_header_len;
int required_headroom, additional_header_len, csum_type;
struct rmnet_map_header *map_header;
csum_type = 0;

additional_header_len = 0;
required_headroom = sizeof(struct rmnet_map_header);

if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
required_headroom += additional_header_len;
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
} else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
}

required_headroom += additional_header_len;

if (skb_headroom(skb) < required_headroom) {
if (pskb_expand_head(skb, required_headroom, 0, GFP_ATOMIC))
return -ENOMEM;
}

if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
rmnet_map_checksum_uplink_packet(skb, orig_dev);
if (csum_type) {
rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
csum_type);
}

map_header = rmnet_map_add_map_header(skb, additional_header_len, 0);
map_header = rmnet_map_add_map_header(skb, additional_header_len, port, 0);
if (!map_header)
return -ENOMEM;

@@ -70,7 +70,9 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev);
struct rmnet_port *port,
struct net_device *orig_dev,
int csum_type);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);

static u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
@@ -263,12 +263,68 @@ rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
}
#endif

void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_v5_csum_header *ul_header;

if (!(port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5))
return;

ul_header = (struct rmnet_map_v5_csum_header *)
skb_push(skb, sizeof(*ul_header));
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;

if (skb->ip_summed == CHECKSUM_PARTIAL) {
void *iph = (char *)ul_header + sizeof(*ul_header);
__sum16 *check;
void *trans;
u8 proto;

if (skb->protocol == htons(ETH_P_IP)) {
u16 ip_len = ((struct iphdr *)iph)->ihl * 4;

proto = ((struct iphdr *)iph)->protocol;
trans = iph + ip_len;
}
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6)) {
u16 ip_len = sizeof(struct ipv6hdr);

proto = ((struct ipv6hdr *)iph)->nexthdr;
trans = iph + ip_len;
}
#endif /* CONFIG_IPV6 */
else {
priv->stats.csum_err_invalid_ip_version++;
goto sw_csum;
}

check = rmnet_map_get_csum_field(proto, trans);
if (check) {
skb->ip_summed = CHECKSUM_NONE;
/* Ask for checksum offloading */
ul_header->csum_valid_required = 1;
priv->stats.csum_hw++;
return;
}
}

sw_csum:
priv->stats.csum_sw++;
}

/* Adds MAP header to front of skb->data
* Padding is calculated and set appropriately in MAP header. Mux ID is
* initialized to 0.
*/
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad)
int hdrlen,
struct rmnet_port *port,
int pad)
{
struct rmnet_map_header *map_header;
u32 padding, map_datalen;
@@ -279,6 +335,11 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
skb_push(skb, sizeof(struct rmnet_map_header));
memset(map_header, 0, sizeof(struct rmnet_map_header));

/* Set next_hdr bit for csum offload packets */
if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
map_header->next_hdr = 1;
}

if (pad == RMNET_MAP_NO_PAD_BYTES) {
map_header->pkt_len = htons(map_datalen);
return map_header;
@@ -395,11 +456,8 @@ int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
return 0;
}

/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev)
{
struct rmnet_priv *priv = netdev_priv(orig_dev);
struct rmnet_map_ul_csum_header *ul_header;
@@ -418,10 +476,12 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,

if (skb->protocol == htons(ETH_P_IP)) {
rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#if IS_ENABLED(CONFIG_IPV6)
rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
priv->stats.csum_hw++;
return;
#else
priv->stats.csum_err_invalid_ip_version++;
@@ -441,6 +501,26 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
priv->stats.csum_sw++;
}

/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
* packets that are supported for UL checksum offload.
*/
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct rmnet_port *port,
struct net_device *orig_dev,
int csum_type)
{
switch (csum_type) {
case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
break;
case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
break;
default:
break;
}
}

/* Process a MAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
u16 len)
@@ -1234,6 +1234,7 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV5 (1U << 5)

enum {
IFLA_RMNET_UNSPEC,

0 comments on commit 7f0a1e3

Please sign in to comment.