Skip to content

Commit 8a39d3e

Browse files
baileyforrestdavem330
authored andcommitted
gve: Introduce a new model for device options
The current model uses an integer ID and a fixed size struct for the parameters of each device option. The new model allows the device option structs to grow in size over time. A driver may assume that changes to device option structs will always be appended. New device options will also generally have a `supported_features_mask` so that the driver knows which fields within a particular device option are enabled. `gve_device_option.feat_mask` is changed to `required_features_mask`, and it is a bitmask which must match the value expected by the driver. This gives the device the ability to break backwards compatibility with old drivers for certain features by blocking the old drivers from trying to use the feature. We maintain ABI compatibility with the old model for GVE_DEV_OPT_ID_RAW_ADDRESSING in case a driver is using a device which does not support the new model. This patch introduces some new terminology: RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA mapped and read/updated by the device. QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with the device for read/write and data is copied from/to SKBs. Signed-off-by: Bailey Forrest <bcf@google.com> Reviewed-by: Willem de Bruijn <willemb@google.com> Reviewed-by: Catherine Sullivan <csully@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
1 parent 920fb45 commit 8a39d3e

File tree

2 files changed

+179
-43
lines changed

2 files changed

+179
-43
lines changed

drivers/net/ethernet/google/gve/gve_adminq.c

Lines changed: 134 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
// SPDX-License-Identifier: (GPL-2.0 OR MIT)
22
/* Google virtual Ethernet (gve) driver
33
*
4-
* Copyright (C) 2015-2019 Google, Inc.
4+
* Copyright (C) 2015-2021 Google, Inc.
55
*/
66

77
#include <linux/etherdevice.h>
@@ -18,6 +18,8 @@
1818
"Expected: length=%d, feature_mask=%x.\n" \
1919
"Actual: length=%d, feature_mask=%x.\n"
2020

21+
#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22+
2123
static
2224
struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
2325
struct gve_device_option *option)
@@ -33,28 +35,81 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
3335
static
3436
void gve_parse_device_option(struct gve_priv *priv,
3537
struct gve_device_descriptor *device_descriptor,
36-
struct gve_device_option *option)
38+
struct gve_device_option *option,
39+
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
40+
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
41+
struct gve_device_option_dqo_rda **dev_op_dqo_rda)
3742
{
43+
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
3844
u16 option_length = be16_to_cpu(option->option_length);
3945
u16 option_id = be16_to_cpu(option->option_id);
4046

47+
/* If the length or feature mask doesn't match, continue without
48+
* enabling the feature.
49+
*/
4150
switch (option_id) {
42-
case GVE_DEV_OPT_ID_RAW_ADDRESSING:
43-
/* If the length or feature mask doesn't match,
44-
* continue without enabling the feature.
45-
*/
46-
if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
47-
option->feat_mask != cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING)) {
48-
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, "Raw Addressing",
49-
GVE_DEV_OPT_LEN_RAW_ADDRESSING,
50-
cpu_to_be32(GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING),
51-
option_length, option->feat_mask);
52-
priv->raw_addressing = 0;
53-
} else {
54-
dev_info(&priv->pdev->dev,
55-
"Raw addressing device option enabled.\n");
56-
priv->raw_addressing = 1;
51+
case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
52+
if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
53+
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
54+
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
55+
"Raw Addressing",
56+
GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
57+
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
58+
option_length, req_feat_mask);
59+
break;
60+
}
61+
62+
dev_info(&priv->pdev->dev,
63+
"Gqi raw addressing device option enabled.\n");
64+
priv->raw_addressing = 1;
65+
break;
66+
case GVE_DEV_OPT_ID_GQI_RDA:
67+
if (option_length < sizeof(**dev_op_gqi_rda) ||
68+
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
69+
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
70+
"GQI RDA", (int)sizeof(**dev_op_gqi_rda),
71+
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
72+
option_length, req_feat_mask);
73+
break;
74+
}
75+
76+
if (option_length > sizeof(**dev_op_gqi_rda)) {
77+
dev_warn(&priv->pdev->dev,
78+
GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
79+
}
80+
*dev_op_gqi_rda = (void *)(option + 1);
81+
break;
82+
case GVE_DEV_OPT_ID_GQI_QPL:
83+
if (option_length < sizeof(**dev_op_gqi_qpl) ||
84+
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
85+
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
86+
"GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
87+
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
88+
option_length, req_feat_mask);
89+
break;
90+
}
91+
92+
if (option_length > sizeof(**dev_op_gqi_qpl)) {
93+
dev_warn(&priv->pdev->dev,
94+
GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
95+
}
96+
*dev_op_gqi_qpl = (void *)(option + 1);
97+
break;
98+
case GVE_DEV_OPT_ID_DQO_RDA:
99+
if (option_length < sizeof(**dev_op_dqo_rda) ||
100+
req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
101+
dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
102+
"DQO RDA", (int)sizeof(**dev_op_dqo_rda),
103+
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
104+
option_length, req_feat_mask);
105+
break;
106+
}
107+
108+
if (option_length > sizeof(**dev_op_dqo_rda)) {
109+
dev_warn(&priv->pdev->dev,
110+
GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
57111
}
112+
*dev_op_dqo_rda = (void *)(option + 1);
58113
break;
59114
default:
60115
/* If we don't recognize the option just continue
@@ -65,6 +120,39 @@ void gve_parse_device_option(struct gve_priv *priv,
65120
}
66121
}
67122

123+
/* Process all device options for a given describe device call. */
124+
static int
125+
gve_process_device_options(struct gve_priv *priv,
126+
struct gve_device_descriptor *descriptor,
127+
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
128+
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
129+
struct gve_device_option_dqo_rda **dev_op_dqo_rda)
130+
{
131+
const int num_options = be16_to_cpu(descriptor->num_device_options);
132+
struct gve_device_option *dev_opt;
133+
int i;
134+
135+
/* The options struct directly follows the device descriptor. */
136+
dev_opt = (void *)(descriptor + 1);
137+
for (i = 0; i < num_options; i++) {
138+
struct gve_device_option *next_opt;
139+
140+
next_opt = gve_get_next_option(descriptor, dev_opt);
141+
if (!next_opt) {
142+
dev_err(&priv->dev->dev,
143+
"options exceed device_descriptor's total length.\n");
144+
return -EINVAL;
145+
}
146+
147+
gve_parse_device_option(priv, descriptor, dev_opt,
148+
dev_op_gqi_rda, dev_op_gqi_qpl,
149+
dev_op_dqo_rda);
150+
dev_opt = next_opt;
151+
}
152+
153+
return 0;
154+
}
155+
68156
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
69157
{
70158
priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
@@ -514,15 +602,15 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
514602

515603
int gve_adminq_describe_device(struct gve_priv *priv)
516604
{
605+
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
606+
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
607+
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
517608
struct gve_device_descriptor *descriptor;
518-
struct gve_device_option *dev_opt;
519609
union gve_adminq_command cmd;
520610
dma_addr_t descriptor_bus;
521-
u16 num_options;
522611
int err = 0;
523612
u8 *mac;
524613
u16 mtu;
525-
int i;
526614

527615
memset(&cmd, 0, sizeof(cmd));
528616
descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
@@ -540,6 +628,31 @@ int gve_adminq_describe_device(struct gve_priv *priv)
540628
if (err)
541629
goto free_device_descriptor;
542630

631+
priv->raw_addressing = 0;
632+
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
633+
&dev_op_gqi_qpl, &dev_op_dqo_rda);
634+
if (err)
635+
goto free_device_descriptor;
636+
637+
/* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
638+
* is not set to GqiRda, choose the queue format in a priority order:
639+
* DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
640+
*/
641+
if (priv->raw_addressing == 1) {
642+
dev_info(&priv->pdev->dev,
643+
"Driver is running with GQI RDA queue format.\n");
644+
} else if (dev_op_dqo_rda) {
645+
dev_info(&priv->pdev->dev,
646+
"Driver is running with DQO RDA queue format.\n");
647+
} else if (dev_op_gqi_rda) {
648+
dev_info(&priv->pdev->dev,
649+
"Driver is running with GQI RDA queue format.\n");
650+
priv->raw_addressing = 1;
651+
} else {
652+
dev_info(&priv->pdev->dev,
653+
"Driver is running with GQI QPL queue format.\n");
654+
}
655+
543656
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
544657
if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
545658
dev_err(&priv->pdev->dev, "Tx desc count %d too low\n", priv->tx_desc_cnt);
@@ -576,26 +689,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
576689
priv->rx_desc_cnt = priv->rx_data_slot_cnt;
577690
}
578691
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
579-
dev_opt = (void *)(descriptor + 1);
580-
581-
num_options = be16_to_cpu(descriptor->num_device_options);
582-
for (i = 0; i < num_options; i++) {
583-
struct gve_device_option *next_opt;
584-
585-
next_opt = gve_get_next_option(descriptor, dev_opt);
586-
if (!next_opt) {
587-
dev_err(&priv->dev->dev,
588-
"options exceed device_descriptor's total length.\n");
589-
err = -EINVAL;
590-
goto free_device_descriptor;
591-
}
592-
593-
gve_parse_device_option(priv, descriptor, dev_opt);
594-
dev_opt = next_opt;
595-
}
596692

597693
free_device_descriptor:
598-
dma_free_coherent(&priv->pdev->dev, sizeof(*descriptor), descriptor,
694+
dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
599695
descriptor_bus);
600696
return err;
601697
}

drivers/net/ethernet/google/gve/gve_adminq.h

Lines changed: 45 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
/* SPDX-License-Identifier: (GPL-2.0 OR MIT)
22
* Google virtual Ethernet (gve) driver
33
*
4-
* Copyright (C) 2015-2019 Google, Inc.
4+
* Copyright (C) 2015-2021 Google, Inc.
55
*/
66

77
#ifndef _GVE_ADMINQ_H
@@ -82,14 +82,54 @@ static_assert(sizeof(struct gve_device_descriptor) == 40);
8282
struct gve_device_option {
8383
__be16 option_id;
8484
__be16 option_length;
85-
__be32 feat_mask;
85+
__be32 required_features_mask;
8686
};
8787

8888
static_assert(sizeof(struct gve_device_option) == 8);
8989

90-
#define GVE_DEV_OPT_ID_RAW_ADDRESSING 0x1
91-
#define GVE_DEV_OPT_LEN_RAW_ADDRESSING 0x0
92-
#define GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING 0x0
90+
struct gve_device_option_gqi_rda {
91+
__be32 supported_features_mask;
92+
};
93+
94+
static_assert(sizeof(struct gve_device_option_gqi_rda) == 4);
95+
96+
struct gve_device_option_gqi_qpl {
97+
__be32 supported_features_mask;
98+
};
99+
100+
static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
101+
102+
struct gve_device_option_dqo_rda {
103+
__be32 supported_features_mask;
104+
__be16 tx_comp_ring_entries;
105+
__be16 rx_buff_ring_entries;
106+
};
107+
108+
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
109+
110+
/* Terminology:
111+
*
112+
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
113+
* mapped and read/updated by the device.
114+
*
115+
* QPL - Queue Page Lists - Driver uses bounce buffers which are DMA mapped with
116+
* the device for read/write and data is copied from/to SKBs.
117+
*/
118+
enum gve_dev_opt_id {
119+
GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
120+
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
121+
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
122+
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
123+
};
124+
125+
enum gve_dev_opt_req_feat_mask {
126+
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
127+
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
128+
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
129+
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
130+
};
131+
132+
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
93133

94134
struct gve_adminq_configure_device_resources {
95135
__be64 counter_array;

0 commit comments

Comments
 (0)