Skip to content
Permalink
Browse files
net:ethernet:rmnet:Support for downlink MAPv5 csum offload
Adding support for processing of Mapv5 downlink packets.
It involves parsing the Mapv5 packet and checking the csum header
to know whether the hardware has validated the checksum and is
valid or not.

Based on the checksum valid bit the corresponding stats are
incremented and skb->ip_summed is marked either CHECKSUM_UNNECESSARY
or left as CHEKSUM_NONE to let network stack revalidated the checksum
and update the respective snmp stats.

Signed-off-by: Sharath Chandra Vurukala <sharathv@codeaurora.org>
  • Loading branch information
Sharath Chandra Vurukala authored and intel-lab-lkp committed Feb 11, 2021
1 parent 1dfe306 commit 293142d706c02bf2e6ce7acb4e04ebb6cf4a2a63
Show file tree
Hide file tree
Showing 6 changed files with 102 additions and 14 deletions.
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation.
* All rights reserved.
*
* RMNET Data configuration engine
*/
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data ingress/egress handler
*/
@@ -57,8 +57,8 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port)
{
struct rmnet_endpoint *ep;
u8 mux_id, next_hdr;
u16 len, pad;
u8 mux_id;

if (RMNET_MAP_GET_CD_BIT(skb)) {
if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
@@ -70,6 +70,7 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,
mux_id = RMNET_MAP_GET_MUX_ID(skb);
pad = RMNET_MAP_GET_PAD(skb);
len = RMNET_MAP_GET_LENGTH(skb) - pad;
next_hdr = RMNET_MAP_GET_NH_BIT(skb);

if (mux_id >= RMNET_MAX_LOGICAL_EP)
goto free_skb;
@@ -80,15 +81,19 @@ __rmnet_map_ingress_handler(struct sk_buff *skb,

skb->dev = ep->egress_dev;

/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
rmnet_set_skb_proto(skb);

if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
if (next_hdr &&
(port->data_format & (RMNET_FLAGS_INGRESS_MAP_CKSUMV5))) {
if (rmnet_map_process_next_hdr_packet(skb, len))
goto free_skb;
} else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
if (!rmnet_map_checksum_downlink_packet(skb, len + pad))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}

/* Subtract MAP header */
skb_pull(skb, sizeof(struct rmnet_map_header));
rmnet_set_skb_proto(skb);

skb_trim(skb, len);
rmnet_deliver_skb(skb);
return;
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*/

#ifndef _RMNET_MAP_H_
@@ -23,6 +23,12 @@ struct rmnet_map_control_command {
};
} __aligned(1);

enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};

enum rmnet_map_commands {
RMNET_MAP_COMMAND_NONE,
RMNET_MAP_COMMAND_FLOW_DISABLE,
@@ -44,6 +50,9 @@ enum rmnet_map_commands {
#define RMNET_MAP_GET_LENGTH(Y) (ntohs(((struct rmnet_map_header *) \
(Y)->data)->pkt_len))

#define RMNET_MAP_GET_NH_BIT(Y) (((struct rmnet_map_header *) \
(Y)->data)->next_hdr)

#define RMNET_MAP_COMMAND_REQUEST 0
#define RMNET_MAP_COMMAND_ACK 1
#define RMNET_MAP_COMMAND_UNSUPPORTED 2
@@ -55,10 +64,29 @@ enum rmnet_map_commands {
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port);
struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
int hdrlen, int pad);
int hdrlen,
struct rmnet_port *port,
int pad);
void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
struct net_device *orig_dev);
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);

static u8 rmnet_map_get_next_hdr_type(struct sk_buff *skb)
{
unsigned char *data = skb->data;

data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->header_type;
}

static inline bool rmnet_map_get_csum_valid(struct sk_buff *skb)
{
unsigned char *data = skb->data;

data += sizeof(struct rmnet_map_header);
return ((struct rmnet_map_v5_csum_header *)data)->csum_valid_required;
}

#endif /* _RMNET_MAP_H_ */
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
*
* RMNET Data MAP protocol
*/
@@ -311,6 +311,7 @@ struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
struct rmnet_port *port)
{
unsigned char *data = skb->data, *next_hdr = NULL;
struct rmnet_map_header *maph;
struct sk_buff *skbn;
u32 packet_len;
@@ -323,6 +324,12 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,

if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
if (!maph->cd_bit) {
packet_len += sizeof(struct rmnet_map_v5_csum_header);
next_hdr = data + sizeof(*maph);
}
}

if (((int)skb->len - (int)packet_len) < 0)
return NULL;
@@ -331,6 +338,11 @@ struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
if (ntohs(maph->pkt_len) == 0)
return NULL;

if (next_hdr &&
((struct rmnet_map_v5_csum_header *)next_hdr)->header_type !=
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
return NULL;

skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
if (!skbn)
return NULL;
@@ -428,3 +440,33 @@ void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,

priv->stats.csum_sw++;
}

/* Process a MAPv5 packet header */
int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
u16 len)
{
struct rmnet_priv *priv = netdev_priv(skb->dev);
int rc = 0;

switch (rmnet_map_get_next_hdr_type(skb)) {
case RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD:
if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
priv->stats.csum_sw++;
} else if (rmnet_map_get_csum_valid(skb)) {
priv->stats.csum_ok++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else {
priv->stats.csum_valid_unset++;
}

/* Pull csum v5 header */
skb_pull(skb, sizeof(struct rmnet_map_v5_csum_header));
break;
default:
rc = -EINVAL;
break;
}

return rc;
}

@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
* Copyright (c) 2013-2019, 2021 The Linux Foundation. All rights reserved.
*/

#ifndef _LINUX_IF_RMNET_H_
@@ -8,11 +8,11 @@
struct rmnet_map_header {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 pad_len:6;
u8 reserved_bit:1;
u8 next_hdr:1;
u8 cd_bit:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
u8 cd_bit:1;
u8 reserved_bit:1;
u8 next_hdr:1;
u8 pad_len:6;
#else
#error "Please fix <asm/byteorder.h>"
@@ -52,4 +52,15 @@ struct rmnet_map_ul_csum_header {
#endif
} __aligned(1);

/* MAP CSUM headers */
struct rmnet_map_v5_csum_header {
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:5;
u8 priority:1;
u8 hw_reserved_bit:1;
u8 csum_valid_required:1;
__be16 reserved;
} __aligned(1);

#endif /* !(_LINUX_IF_RMNET_H_) */
@@ -1233,6 +1233,7 @@ enum {
#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2)
#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3)
#define RMNET_FLAGS_INGRESS_MAP_CKSUMV5 (1U << 4)

enum {
IFLA_RMNET_UNSPEC,

0 comments on commit 293142d

Please sign in to comment.