11// SPDX-License-Identifier: (GPL-2.0 OR MIT)
22/* Google virtual Ethernet (gve) driver
33 *
4- * Copyright (C) 2015-2019 Google, Inc.
4+ * Copyright (C) 2015-2021 Google, Inc.
55 */
66
77#include <linux/etherdevice.h>
1818"Expected: length=%d, feature_mask=%x.\n" \
1919"Actual: length=%d, feature_mask=%x.\n"
2020
21+ #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22+
2123static
2224struct gve_device_option * gve_get_next_option (struct gve_device_descriptor * descriptor ,
2325 struct gve_device_option * option )
@@ -33,28 +35,81 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
3335static
3436void gve_parse_device_option (struct gve_priv * priv ,
3537 struct gve_device_descriptor * device_descriptor ,
36- struct gve_device_option * option )
38+ struct gve_device_option * option ,
39+ struct gve_device_option_gqi_rda * * dev_op_gqi_rda ,
40+ struct gve_device_option_gqi_qpl * * dev_op_gqi_qpl ,
41+ struct gve_device_option_dqo_rda * * dev_op_dqo_rda )
3742{
43+ u32 req_feat_mask = be32_to_cpu (option -> required_features_mask );
3844 u16 option_length = be16_to_cpu (option -> option_length );
3945 u16 option_id = be16_to_cpu (option -> option_id );
4046
47+ /* If the length or feature mask doesn't match, continue without
48+ * enabling the feature.
49+ */
4150 switch (option_id ) {
42- case GVE_DEV_OPT_ID_RAW_ADDRESSING :
43- /* If the length or feature mask doesn't match,
44- * continue without enabling the feature.
45- */
46- if (option_length != GVE_DEV_OPT_LEN_RAW_ADDRESSING ||
47- option -> feat_mask != cpu_to_be32 (GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING )) {
48- dev_warn (& priv -> pdev -> dev , GVE_DEVICE_OPTION_ERROR_FMT , "Raw Addressing" ,
49- GVE_DEV_OPT_LEN_RAW_ADDRESSING ,
50- cpu_to_be32 (GVE_DEV_OPT_FEAT_MASK_RAW_ADDRESSING ),
51- option_length , option -> feat_mask );
52- priv -> raw_addressing = 0 ;
53- } else {
54- dev_info (& priv -> pdev -> dev ,
55- "Raw addressing device option enabled.\n" );
56- priv -> raw_addressing = 1 ;
51+ case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING :
52+ if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
53+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING ) {
54+ dev_warn (& priv -> pdev -> dev , GVE_DEVICE_OPTION_ERROR_FMT ,
55+ "Raw Addressing" ,
56+ GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ,
57+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING ,
58+ option_length , req_feat_mask );
59+ break ;
60+ }
61+
62+ dev_info (& priv -> pdev -> dev ,
63+ "Gqi raw addressing device option enabled.\n" );
64+ priv -> raw_addressing = 1 ;
65+ break ;
66+ case GVE_DEV_OPT_ID_GQI_RDA :
67+ if (option_length < sizeof (* * dev_op_gqi_rda ) ||
68+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA ) {
69+ dev_warn (& priv -> pdev -> dev , GVE_DEVICE_OPTION_ERROR_FMT ,
70+ "GQI RDA" , (int )sizeof (* * dev_op_gqi_rda ),
71+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA ,
72+ option_length , req_feat_mask );
73+ break ;
74+ }
75+
76+ if (option_length > sizeof (* * dev_op_gqi_rda )) {
77+ dev_warn (& priv -> pdev -> dev ,
78+ GVE_DEVICE_OPTION_TOO_BIG_FMT , "GQI RDA" );
79+ }
80+ * dev_op_gqi_rda = (void * )(option + 1 );
81+ break ;
82+ case GVE_DEV_OPT_ID_GQI_QPL :
83+ if (option_length < sizeof (* * dev_op_gqi_qpl ) ||
84+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL ) {
85+ dev_warn (& priv -> pdev -> dev , GVE_DEVICE_OPTION_ERROR_FMT ,
86+ "GQI QPL" , (int )sizeof (* * dev_op_gqi_qpl ),
87+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL ,
88+ option_length , req_feat_mask );
89+ break ;
90+ }
91+
92+ if (option_length > sizeof (* * dev_op_gqi_qpl )) {
93+ dev_warn (& priv -> pdev -> dev ,
94+ GVE_DEVICE_OPTION_TOO_BIG_FMT , "GQI QPL" );
95+ }
96+ * dev_op_gqi_qpl = (void * )(option + 1 );
97+ break ;
98+ case GVE_DEV_OPT_ID_DQO_RDA :
99+ if (option_length < sizeof (* * dev_op_dqo_rda ) ||
100+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA ) {
101+ dev_warn (& priv -> pdev -> dev , GVE_DEVICE_OPTION_ERROR_FMT ,
102+ "DQO RDA" , (int )sizeof (* * dev_op_dqo_rda ),
103+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA ,
104+ option_length , req_feat_mask );
105+ break ;
106+ }
107+
108+ if (option_length > sizeof (* * dev_op_dqo_rda )) {
109+ dev_warn (& priv -> pdev -> dev ,
110+ GVE_DEVICE_OPTION_TOO_BIG_FMT , "DQO RDA" );
57111 }
112+ * dev_op_dqo_rda = (void * )(option + 1 );
58113 break ;
59114 default :
60115 /* If we don't recognize the option just continue
@@ -65,6 +120,39 @@ void gve_parse_device_option(struct gve_priv *priv,
65120 }
66121}
67122
123+ /* Process all device options for a given describe device call. */
124+ static int
125+ gve_process_device_options (struct gve_priv * priv ,
126+ struct gve_device_descriptor * descriptor ,
127+ struct gve_device_option_gqi_rda * * dev_op_gqi_rda ,
128+ struct gve_device_option_gqi_qpl * * dev_op_gqi_qpl ,
129+ struct gve_device_option_dqo_rda * * dev_op_dqo_rda )
130+ {
131+ const int num_options = be16_to_cpu (descriptor -> num_device_options );
132+ struct gve_device_option * dev_opt ;
133+ int i ;
134+
135+ /* The options struct directly follows the device descriptor. */
136+ dev_opt = (void * )(descriptor + 1 );
137+ for (i = 0 ; i < num_options ; i ++ ) {
138+ struct gve_device_option * next_opt ;
139+
140+ next_opt = gve_get_next_option (descriptor , dev_opt );
141+ if (!next_opt ) {
142+ dev_err (& priv -> dev -> dev ,
143+ "options exceed device_descriptor's total length.\n" );
144+ return - EINVAL ;
145+ }
146+
147+ gve_parse_device_option (priv , descriptor , dev_opt ,
148+ dev_op_gqi_rda , dev_op_gqi_qpl ,
149+ dev_op_dqo_rda );
150+ dev_opt = next_opt ;
151+ }
152+
153+ return 0 ;
154+ }
155+
68156int gve_adminq_alloc (struct device * dev , struct gve_priv * priv )
69157{
70158 priv -> adminq = dma_alloc_coherent (dev , PAGE_SIZE ,
@@ -514,15 +602,15 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
514602
515603int gve_adminq_describe_device (struct gve_priv * priv )
516604{
605+ struct gve_device_option_gqi_rda * dev_op_gqi_rda = NULL ;
606+ struct gve_device_option_gqi_qpl * dev_op_gqi_qpl = NULL ;
607+ struct gve_device_option_dqo_rda * dev_op_dqo_rda = NULL ;
517608 struct gve_device_descriptor * descriptor ;
518- struct gve_device_option * dev_opt ;
519609 union gve_adminq_command cmd ;
520610 dma_addr_t descriptor_bus ;
521- u16 num_options ;
522611 int err = 0 ;
523612 u8 * mac ;
524613 u16 mtu ;
525- int i ;
526614
527615 memset (& cmd , 0 , sizeof (cmd ));
528616 descriptor = dma_alloc_coherent (& priv -> pdev -> dev , PAGE_SIZE ,
@@ -540,6 +628,31 @@ int gve_adminq_describe_device(struct gve_priv *priv)
540628 if (err )
541629 goto free_device_descriptor ;
542630
631+ priv -> raw_addressing = 0 ;
632+ err = gve_process_device_options (priv , descriptor , & dev_op_gqi_rda ,
633+ & dev_op_gqi_qpl , & dev_op_dqo_rda );
634+ if (err )
635+ goto free_device_descriptor ;
636+
637+ /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
638+ * is not set to GqiRda, choose the queue format in a priority order:
639+ * DqoRda, GqiRda, GqiQpl. Use GqiQpl as default.
640+ */
641+ if (priv -> raw_addressing == 1 ) {
642+ dev_info (& priv -> pdev -> dev ,
643+ "Driver is running with GQI RDA queue format.\n" );
644+ } else if (dev_op_dqo_rda ) {
645+ dev_info (& priv -> pdev -> dev ,
646+ "Driver is running with DQO RDA queue format.\n" );
647+ } else if (dev_op_gqi_rda ) {
648+ dev_info (& priv -> pdev -> dev ,
649+ "Driver is running with GQI RDA queue format.\n" );
650+ priv -> raw_addressing = 1 ;
651+ } else {
652+ dev_info (& priv -> pdev -> dev ,
653+ "Driver is running with GQI QPL queue format.\n" );
654+ }
655+
543656 priv -> tx_desc_cnt = be16_to_cpu (descriptor -> tx_queue_entries );
544657 if (priv -> tx_desc_cnt * sizeof (priv -> tx -> desc [0 ]) < PAGE_SIZE ) {
545658 dev_err (& priv -> pdev -> dev , "Tx desc count %d too low\n" , priv -> tx_desc_cnt );
@@ -576,26 +689,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
576689 priv -> rx_desc_cnt = priv -> rx_data_slot_cnt ;
577690 }
578691 priv -> default_num_queues = be16_to_cpu (descriptor -> default_num_queues );
579- dev_opt = (void * )(descriptor + 1 );
580-
581- num_options = be16_to_cpu (descriptor -> num_device_options );
582- for (i = 0 ; i < num_options ; i ++ ) {
583- struct gve_device_option * next_opt ;
584-
585- next_opt = gve_get_next_option (descriptor , dev_opt );
586- if (!next_opt ) {
587- dev_err (& priv -> dev -> dev ,
588- "options exceed device_descriptor's total length.\n" );
589- err = - EINVAL ;
590- goto free_device_descriptor ;
591- }
592-
593- gve_parse_device_option (priv , descriptor , dev_opt );
594- dev_opt = next_opt ;
595- }
596692
597693free_device_descriptor :
598- dma_free_coherent (& priv -> pdev -> dev , sizeof ( * descriptor ) , descriptor ,
694+ dma_free_coherent (& priv -> pdev -> dev , PAGE_SIZE , descriptor ,
599695 descriptor_bus );
600696 return err ;
601697}
0 commit comments