@@ -657,6 +657,14 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
657657 if (err1 )
658658 esw_warn (esw -> dev , "Failed setting eswitch back to legacy, err %d\n" , err );
659659 }
660+ if (esw -> offloads .inline_mode == MLX5_INLINE_MODE_NONE ) {
661+ if (mlx5_eswitch_inline_mode_get (esw ,
662+ num_vfs ,
663+ & esw -> offloads .inline_mode )) {
664+ esw -> offloads .inline_mode = MLX5_INLINE_MODE_L2 ;
665+ esw_warn (esw -> dev , "Inline mode is different between vports\n" );
666+ }
667+ }
660668 return err ;
661669}
662670
@@ -771,6 +779,50 @@ static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
771779 return 0 ;
772780}
773781
782+ static int esw_inline_mode_from_devlink (u8 mode , u8 * mlx5_mode )
783+ {
784+ switch (mode ) {
785+ case DEVLINK_ESWITCH_INLINE_MODE_NONE :
786+ * mlx5_mode = MLX5_INLINE_MODE_NONE ;
787+ break ;
788+ case DEVLINK_ESWITCH_INLINE_MODE_LINK :
789+ * mlx5_mode = MLX5_INLINE_MODE_L2 ;
790+ break ;
791+ case DEVLINK_ESWITCH_INLINE_MODE_NETWORK :
792+ * mlx5_mode = MLX5_INLINE_MODE_IP ;
793+ break ;
794+ case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT :
795+ * mlx5_mode = MLX5_INLINE_MODE_TCP_UDP ;
796+ break ;
797+ default :
798+ return - EINVAL ;
799+ }
800+
801+ return 0 ;
802+ }
803+
804+ static int esw_inline_mode_to_devlink (u8 mlx5_mode , u8 * mode )
805+ {
806+ switch (mlx5_mode ) {
807+ case MLX5_INLINE_MODE_NONE :
808+ * mode = DEVLINK_ESWITCH_INLINE_MODE_NONE ;
809+ break ;
810+ case MLX5_INLINE_MODE_L2 :
811+ * mode = DEVLINK_ESWITCH_INLINE_MODE_LINK ;
812+ break ;
813+ case MLX5_INLINE_MODE_IP :
814+ * mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK ;
815+ break ;
816+ case MLX5_INLINE_MODE_TCP_UDP :
817+ * mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT ;
818+ break ;
819+ default :
820+ return - EINVAL ;
821+ }
822+
823+ return 0 ;
824+ }
825+
774826int mlx5_devlink_eswitch_mode_set (struct devlink * devlink , u16 mode )
775827{
776828 struct mlx5_core_dev * dev ;
@@ -815,6 +867,95 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
815867 return esw_mode_to_devlink (dev -> priv .eswitch -> mode , mode );
816868}
817869
870+ int mlx5_devlink_eswitch_inline_mode_set (struct devlink * devlink , u8 mode )
871+ {
872+ struct mlx5_core_dev * dev = devlink_priv (devlink );
873+ struct mlx5_eswitch * esw = dev -> priv .eswitch ;
874+ int num_vports = esw -> enabled_vports ;
875+ int err ;
876+ int vport ;
877+ u8 mlx5_mode ;
878+
879+ if (!MLX5_CAP_GEN (dev , vport_group_manager ))
880+ return - EOPNOTSUPP ;
881+
882+ if (esw -> mode == SRIOV_NONE )
883+ return - EOPNOTSUPP ;
884+
885+ if (MLX5_CAP_ETH (dev , wqe_inline_mode ) !=
886+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT )
887+ return - EOPNOTSUPP ;
888+
889+ err = esw_inline_mode_from_devlink (mode , & mlx5_mode );
890+ if (err )
891+ goto out ;
892+
893+ for (vport = 1 ; vport < num_vports ; vport ++ ) {
894+ err = mlx5_modify_nic_vport_min_inline (dev , vport , mlx5_mode );
895+ if (err ) {
896+ esw_warn (dev , "Failed to set min inline on vport %d\n" ,
897+ vport );
898+ goto revert_inline_mode ;
899+ }
900+ }
901+
902+ esw -> offloads .inline_mode = mlx5_mode ;
903+ return 0 ;
904+
905+ revert_inline_mode :
906+ while (-- vport > 0 )
907+ mlx5_modify_nic_vport_min_inline (dev ,
908+ vport ,
909+ esw -> offloads .inline_mode );
910+ out :
911+ return err ;
912+ }
913+
914+ int mlx5_devlink_eswitch_inline_mode_get (struct devlink * devlink , u8 * mode )
915+ {
916+ struct mlx5_core_dev * dev = devlink_priv (devlink );
917+ struct mlx5_eswitch * esw = dev -> priv .eswitch ;
918+
919+ if (!MLX5_CAP_GEN (dev , vport_group_manager ))
920+ return - EOPNOTSUPP ;
921+
922+ if (esw -> mode == SRIOV_NONE )
923+ return - EOPNOTSUPP ;
924+
925+ if (MLX5_CAP_ETH (dev , wqe_inline_mode ) !=
926+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT )
927+ return - EOPNOTSUPP ;
928+
929+ return esw_inline_mode_to_devlink (esw -> offloads .inline_mode , mode );
930+ }
931+
932+ int mlx5_eswitch_inline_mode_get (struct mlx5_eswitch * esw , int nvfs , u8 * mode )
933+ {
934+ struct mlx5_core_dev * dev = esw -> dev ;
935+ int vport ;
936+ u8 prev_mlx5_mode , mlx5_mode = MLX5_INLINE_MODE_L2 ;
937+
938+ if (!MLX5_CAP_GEN (dev , vport_group_manager ))
939+ return - EOPNOTSUPP ;
940+
941+ if (esw -> mode == SRIOV_NONE )
942+ return - EOPNOTSUPP ;
943+
944+ if (MLX5_CAP_ETH (dev , wqe_inline_mode ) !=
945+ MLX5_CAP_INLINE_MODE_VPORT_CONTEXT )
946+ return - EOPNOTSUPP ;
947+
948+ for (vport = 1 ; vport <= nvfs ; vport ++ ) {
949+ mlx5_query_nic_vport_min_inline (dev , vport , & mlx5_mode );
950+ if (vport > 1 && prev_mlx5_mode != mlx5_mode )
951+ return - EINVAL ;
952+ prev_mlx5_mode = mlx5_mode ;
953+ }
954+
955+ * mode = mlx5_mode ;
956+ return 0 ;
957+ }
958+
818959void mlx5_eswitch_register_vport_rep (struct mlx5_eswitch * esw ,
819960 int vport_index ,
820961 struct mlx5_eswitch_rep * __rep )
0 commit comments