@@ -2325,6 +2325,274 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
23252325 mlx5_esw_offloads_devlink_port_unregister (esw , vport_num );
23262326}
23272327
2328+ static int esw_set_uplink_slave_ingress_root (struct mlx5_core_dev * master ,
2329+ struct mlx5_core_dev * slave )
2330+ {
2331+ u32 in [MLX5_ST_SZ_DW (set_flow_table_root_in )] = {};
2332+ u32 out [MLX5_ST_SZ_DW (set_flow_table_root_out )] = {};
2333+ struct mlx5_eswitch * esw ;
2334+ struct mlx5_flow_root_namespace * root ;
2335+ struct mlx5_flow_namespace * ns ;
2336+ struct mlx5_vport * vport ;
2337+ int err ;
2338+
2339+ MLX5_SET (set_flow_table_root_in , in , opcode ,
2340+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT );
2341+ MLX5_SET (set_flow_table_root_in , in , table_type , FS_FT_ESW_INGRESS_ACL );
2342+ MLX5_SET (set_flow_table_root_in , in , other_vport , 1 );
2343+ MLX5_SET (set_flow_table_root_in , in , vport_number , MLX5_VPORT_UPLINK );
2344+
2345+ if (master ) {
2346+ esw = master -> priv .eswitch ;
2347+ vport = mlx5_eswitch_get_vport (esw , MLX5_VPORT_UPLINK );
2348+ MLX5_SET (set_flow_table_root_in , in , table_of_other_vport , 1 );
2349+ MLX5_SET (set_flow_table_root_in , in , table_vport_number ,
2350+ MLX5_VPORT_UPLINK );
2351+
2352+ ns = mlx5_get_flow_vport_acl_namespace (master ,
2353+ MLX5_FLOW_NAMESPACE_ESW_INGRESS ,
2354+ vport -> index );
2355+ root = find_root (& ns -> node );
2356+ mutex_lock (& root -> chain_lock );
2357+
2358+ MLX5_SET (set_flow_table_root_in , in ,
2359+ table_eswitch_owner_vhca_id_valid , 1 );
2360+ MLX5_SET (set_flow_table_root_in , in ,
2361+ table_eswitch_owner_vhca_id ,
2362+ MLX5_CAP_GEN (master , vhca_id ));
2363+ MLX5_SET (set_flow_table_root_in , in , table_id ,
2364+ root -> root_ft -> id );
2365+ } else {
2366+ esw = slave -> priv .eswitch ;
2367+ vport = mlx5_eswitch_get_vport (esw , MLX5_VPORT_UPLINK );
2368+ ns = mlx5_get_flow_vport_acl_namespace (slave ,
2369+ MLX5_FLOW_NAMESPACE_ESW_INGRESS ,
2370+ vport -> index );
2371+ root = find_root (& ns -> node );
2372+ mutex_lock (& root -> chain_lock );
2373+ MLX5_SET (set_flow_table_root_in , in , table_id , root -> root_ft -> id );
2374+ }
2375+
2376+ err = mlx5_cmd_exec (slave , in , sizeof (in ), out , sizeof (out ));
2377+ mutex_unlock (& root -> chain_lock );
2378+
2379+ return err ;
2380+ }
2381+
2382+ static int esw_set_slave_root_fdb (struct mlx5_core_dev * master ,
2383+ struct mlx5_core_dev * slave )
2384+ {
2385+ u32 in [MLX5_ST_SZ_DW (set_flow_table_root_in )] = {};
2386+ u32 out [MLX5_ST_SZ_DW (set_flow_table_root_out )] = {};
2387+ struct mlx5_flow_root_namespace * root ;
2388+ struct mlx5_flow_namespace * ns ;
2389+ int err ;
2390+
2391+ MLX5_SET (set_flow_table_root_in , in , opcode ,
2392+ MLX5_CMD_OP_SET_FLOW_TABLE_ROOT );
2393+ MLX5_SET (set_flow_table_root_in , in , table_type ,
2394+ FS_FT_FDB );
2395+
2396+ if (master ) {
2397+ ns = mlx5_get_flow_namespace (master ,
2398+ MLX5_FLOW_NAMESPACE_FDB );
2399+ root = find_root (& ns -> node );
2400+ mutex_lock (& root -> chain_lock );
2401+ MLX5_SET (set_flow_table_root_in , in ,
2402+ table_eswitch_owner_vhca_id_valid , 1 );
2403+ MLX5_SET (set_flow_table_root_in , in ,
2404+ table_eswitch_owner_vhca_id ,
2405+ MLX5_CAP_GEN (master , vhca_id ));
2406+ MLX5_SET (set_flow_table_root_in , in , table_id ,
2407+ root -> root_ft -> id );
2408+ } else {
2409+ ns = mlx5_get_flow_namespace (slave ,
2410+ MLX5_FLOW_NAMESPACE_FDB );
2411+ root = find_root (& ns -> node );
2412+ mutex_lock (& root -> chain_lock );
2413+ MLX5_SET (set_flow_table_root_in , in , table_id ,
2414+ root -> root_ft -> id );
2415+ }
2416+
2417+ err = mlx5_cmd_exec (slave , in , sizeof (in ), out , sizeof (out ));
2418+ mutex_unlock (& root -> chain_lock );
2419+
2420+ return err ;
2421+ }
2422+
2423+ static int __esw_set_master_egress_rule (struct mlx5_core_dev * master ,
2424+ struct mlx5_core_dev * slave ,
2425+ struct mlx5_vport * vport ,
2426+ struct mlx5_flow_table * acl )
2427+ {
2428+ struct mlx5_flow_handle * flow_rule = NULL ;
2429+ struct mlx5_flow_destination dest = {};
2430+ struct mlx5_flow_act flow_act = {};
2431+ struct mlx5_flow_spec * spec ;
2432+ int err = 0 ;
2433+ void * misc ;
2434+
2435+ spec = kvzalloc (sizeof (* spec ), GFP_KERNEL );
2436+ if (!spec )
2437+ return - ENOMEM ;
2438+
2439+ spec -> match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS ;
2440+ misc = MLX5_ADDR_OF (fte_match_param , spec -> match_value ,
2441+ misc_parameters );
2442+ MLX5_SET (fte_match_set_misc , misc , source_port , MLX5_VPORT_UPLINK );
2443+ MLX5_SET (fte_match_set_misc , misc , source_eswitch_owner_vhca_id ,
2444+ MLX5_CAP_GEN (slave , vhca_id ));
2445+
2446+ misc = MLX5_ADDR_OF (fte_match_param , spec -> match_criteria , misc_parameters );
2447+ MLX5_SET_TO_ONES (fte_match_set_misc , misc , source_port );
2448+ MLX5_SET_TO_ONES (fte_match_set_misc , misc ,
2449+ source_eswitch_owner_vhca_id );
2450+
2451+ flow_act .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ;
2452+ dest .type = MLX5_FLOW_DESTINATION_TYPE_VPORT ;
2453+ dest .vport .num = slave -> priv .eswitch -> manager_vport ;
2454+ dest .vport .vhca_id = MLX5_CAP_GEN (slave , vhca_id );
2455+ dest .vport .flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID ;
2456+
2457+ flow_rule = mlx5_add_flow_rules (acl , spec , & flow_act ,
2458+ & dest , 1 );
2459+ if (IS_ERR (flow_rule ))
2460+ err = PTR_ERR (flow_rule );
2461+ else
2462+ vport -> egress .offloads .bounce_rule = flow_rule ;
2463+
2464+ kvfree (spec );
2465+ return err ;
2466+ }
2467+
2468+ static int esw_set_master_egress_rule (struct mlx5_core_dev * master ,
2469+ struct mlx5_core_dev * slave )
2470+ {
2471+ int inlen = MLX5_ST_SZ_BYTES (create_flow_group_in );
2472+ struct mlx5_eswitch * esw = master -> priv .eswitch ;
2473+ struct mlx5_flow_table_attr ft_attr = {
2474+ .max_fte = 1 , .prio = 0 , .level = 0 ,
2475+ };
2476+ struct mlx5_flow_namespace * egress_ns ;
2477+ struct mlx5_flow_table * acl ;
2478+ struct mlx5_flow_group * g ;
2479+ struct mlx5_vport * vport ;
2480+ void * match_criteria ;
2481+ u32 * flow_group_in ;
2482+ int err ;
2483+
2484+ vport = mlx5_eswitch_get_vport (esw , esw -> manager_vport );
2485+ if (IS_ERR (vport ))
2486+ return PTR_ERR (vport );
2487+
2488+ egress_ns = mlx5_get_flow_vport_acl_namespace (master ,
2489+ MLX5_FLOW_NAMESPACE_ESW_EGRESS ,
2490+ vport -> index );
2491+ if (!egress_ns )
2492+ return - EINVAL ;
2493+
2494+ if (vport -> egress .acl )
2495+ return - EINVAL ;
2496+
2497+ flow_group_in = kvzalloc (inlen , GFP_KERNEL );
2498+ if (!flow_group_in )
2499+ return - ENOMEM ;
2500+
2501+ acl = mlx5_create_vport_flow_table (egress_ns , & ft_attr , vport -> vport );
2502+ if (IS_ERR (acl )) {
2503+ err = PTR_ERR (acl );
2504+ goto out ;
2505+ }
2506+
2507+ match_criteria = MLX5_ADDR_OF (create_flow_group_in , flow_group_in ,
2508+ match_criteria );
2509+ MLX5_SET_TO_ONES (fte_match_param , match_criteria ,
2510+ misc_parameters .source_port );
2511+ MLX5_SET_TO_ONES (fte_match_param , match_criteria ,
2512+ misc_parameters .source_eswitch_owner_vhca_id );
2513+ MLX5_SET (create_flow_group_in , flow_group_in , match_criteria_enable ,
2514+ MLX5_MATCH_MISC_PARAMETERS );
2515+
2516+ MLX5_SET (create_flow_group_in , flow_group_in ,
2517+ source_eswitch_owner_vhca_id_valid , 1 );
2518+ MLX5_SET (create_flow_group_in , flow_group_in , start_flow_index , 0 );
2519+ MLX5_SET (create_flow_group_in , flow_group_in , end_flow_index , 0 );
2520+
2521+ g = mlx5_create_flow_group (acl , flow_group_in );
2522+ if (IS_ERR (g )) {
2523+ err = PTR_ERR (g );
2524+ goto err_group ;
2525+ }
2526+
2527+ err = __esw_set_master_egress_rule (master , slave , vport , acl );
2528+ if (err )
2529+ goto err_rule ;
2530+
2531+ vport -> egress .acl = acl ;
2532+ vport -> egress .offloads .bounce_grp = g ;
2533+
2534+ kvfree (flow_group_in );
2535+
2536+ return 0 ;
2537+
2538+ err_rule :
2539+ mlx5_destroy_flow_group (g );
2540+ err_group :
2541+ mlx5_destroy_flow_table (acl );
2542+ out :
2543+ kvfree (flow_group_in );
2544+ return err ;
2545+ }
2546+
2547+ static void esw_unset_master_egress_rule (struct mlx5_core_dev * dev )
2548+ {
2549+ struct mlx5_vport * vport ;
2550+
2551+ vport = mlx5_eswitch_get_vport (dev -> priv .eswitch ,
2552+ dev -> priv .eswitch -> manager_vport );
2553+
2554+ esw_acl_egress_ofld_cleanup (vport );
2555+ }
2556+
2557+ int mlx5_eswitch_offloads_config_single_fdb (struct mlx5_eswitch * master_esw ,
2558+ struct mlx5_eswitch * slave_esw )
2559+ {
2560+ int err ;
2561+
2562+ err = esw_set_uplink_slave_ingress_root (master_esw -> dev ,
2563+ slave_esw -> dev );
2564+ if (err )
2565+ return - EINVAL ;
2566+
2567+ err = esw_set_slave_root_fdb (master_esw -> dev ,
2568+ slave_esw -> dev );
2569+ if (err )
2570+ goto err_fdb ;
2571+
2572+ err = esw_set_master_egress_rule (master_esw -> dev ,
2573+ slave_esw -> dev );
2574+ if (err )
2575+ goto err_acl ;
2576+
2577+ return err ;
2578+
2579+ err_acl :
2580+ esw_set_slave_root_fdb (NULL , slave_esw -> dev );
2581+
2582+ err_fdb :
2583+ esw_set_uplink_slave_ingress_root (NULL , slave_esw -> dev );
2584+
2585+ return err ;
2586+ }
2587+
2588+ void mlx5_eswitch_offloads_destroy_single_fdb (struct mlx5_eswitch * master_esw ,
2589+ struct mlx5_eswitch * slave_esw )
2590+ {
2591+ esw_unset_master_egress_rule (master_esw -> dev );
2592+ esw_set_slave_root_fdb (NULL , slave_esw -> dev );
2593+ esw_set_uplink_slave_ingress_root (NULL , slave_esw -> dev );
2594+ }
2595+
23282596#define ESW_OFFLOADS_DEVCOM_PAIR (0)
23292597#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
23302598
@@ -2674,6 +2942,31 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
26742942 esw_vport_destroy_offloads_acl_tables (esw , vport );
26752943}
26762944
2945+ int mlx5_eswitch_reload_reps (struct mlx5_eswitch * esw )
2946+ {
2947+ struct mlx5_eswitch_rep * rep ;
2948+ unsigned long i ;
2949+ int ret ;
2950+
2951+ if (!esw || esw -> mode != MLX5_ESWITCH_OFFLOADS )
2952+ return 0 ;
2953+
2954+ rep = mlx5_eswitch_get_rep (esw , MLX5_VPORT_UPLINK );
2955+ if (atomic_read (& rep -> rep_data [REP_ETH ].state ) != REP_LOADED )
2956+ return 0 ;
2957+
2958+ ret = mlx5_esw_offloads_rep_load (esw , MLX5_VPORT_UPLINK );
2959+ if (ret )
2960+ return ret ;
2961+
2962+ mlx5_esw_for_each_rep (esw , i , rep ) {
2963+ if (atomic_read (& rep -> rep_data [REP_ETH ].state ) == REP_LOADED )
2964+ mlx5_esw_offloads_rep_load (esw , rep -> vport );
2965+ }
2966+
2967+ return 0 ;
2968+ }
2969+
26772970static int esw_offloads_steering_init (struct mlx5_eswitch * esw )
26782971{
26792972 struct mlx5_esw_indir_table * indir ;
0 commit comments