@@ -1297,17 +1297,19 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1297
1297
esw -> mode == MLX5_ESWITCH_LEGACY ;
1298
1298
1299
1299
/* Enable PF vport */
1300
- if (pf_needed ) {
1300
+ if (pf_needed && mlx5_esw_host_functions_enabled ( esw -> dev ) ) {
1301
1301
ret = mlx5_eswitch_load_pf_vf_vport (esw , MLX5_VPORT_PF ,
1302
1302
enabled_events );
1303
1303
if (ret )
1304
1304
return ret ;
1305
1305
}
1306
1306
1307
- /* Enable external host PF HCA */
1308
- ret = host_pf_enable_hca (esw -> dev );
1309
- if (ret )
1310
- goto pf_hca_err ;
1307
+ if (mlx5_esw_host_functions_enabled (esw -> dev )) {
1308
+ /* Enable external host PF HCA */
1309
+ ret = host_pf_enable_hca (esw -> dev );
1310
+ if (ret )
1311
+ goto pf_hca_err ;
1312
+ }
1311
1313
1312
1314
/* Enable ECPF vport */
1313
1315
if (mlx5_ecpf_vport_exists (esw -> dev )) {
@@ -1339,9 +1341,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1339
1341
if (mlx5_ecpf_vport_exists (esw -> dev ))
1340
1342
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_ECPF );
1341
1343
ecpf_err :
1342
- host_pf_disable_hca (esw -> dev );
1344
+ if (mlx5_esw_host_functions_enabled (esw -> dev ))
1345
+ host_pf_disable_hca (esw -> dev );
1343
1346
pf_hca_err :
1344
- if (pf_needed )
1347
+ if (pf_needed && mlx5_esw_host_functions_enabled ( esw -> dev ) )
1345
1348
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_PF );
1346
1349
return ret ;
1347
1350
}
@@ -1361,10 +1364,12 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1361
1364
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_ECPF );
1362
1365
}
1363
1366
1364
- host_pf_disable_hca (esw -> dev );
1367
+ if (mlx5_esw_host_functions_enabled (esw -> dev ))
1368
+ host_pf_disable_hca (esw -> dev );
1365
1369
1366
- if (mlx5_core_is_ecpf_esw_manager (esw -> dev ) ||
1367
- esw -> mode == MLX5_ESWITCH_LEGACY )
1370
+ if ((mlx5_core_is_ecpf_esw_manager (esw -> dev ) ||
1371
+ esw -> mode == MLX5_ESWITCH_LEGACY ) &&
1372
+ mlx5_esw_host_functions_enabled (esw -> dev ))
1368
1373
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_PF );
1369
1374
}
1370
1375
@@ -1693,7 +1698,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
1693
1698
void * hca_caps ;
1694
1699
int err ;
1695
1700
1696
- if (!mlx5_core_is_ecpf (dev )) {
1701
+ if (!mlx5_core_is_ecpf (dev ) ||
1702
+ !mlx5_esw_host_functions_enabled (dev )) {
1697
1703
* max_sfs = 0 ;
1698
1704
return 0 ;
1699
1705
}
@@ -1769,21 +1775,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1769
1775
1770
1776
xa_init (& esw -> vports );
1771
1777
1772
- err = mlx5_esw_vport_alloc (esw , idx , MLX5_VPORT_PF );
1773
- if (err )
1774
- goto err ;
1775
- if (esw -> first_host_vport == MLX5_VPORT_PF )
1776
- xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1777
- idx ++ ;
1778
-
1779
- for (i = 0 ; i < mlx5_core_max_vfs (dev ); i ++ ) {
1780
- err = mlx5_esw_vport_alloc (esw , idx , idx );
1778
+ if (mlx5_esw_host_functions_enabled (dev )) {
1779
+ err = mlx5_esw_vport_alloc (esw , idx , MLX5_VPORT_PF );
1781
1780
if (err )
1782
1781
goto err ;
1783
- xa_set_mark ( & esw -> vports , idx , MLX5_ESW_VPT_VF );
1784
- xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1782
+ if ( esw -> first_host_vport == MLX5_VPORT_PF )
1783
+ xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1785
1784
idx ++ ;
1785
+ for (i = 0 ; i < mlx5_core_max_vfs (dev ); i ++ ) {
1786
+ err = mlx5_esw_vport_alloc (esw , idx , idx );
1787
+ if (err )
1788
+ goto err ;
1789
+ xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_VF );
1790
+ xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1791
+ idx ++ ;
1792
+ }
1786
1793
}
1794
+
1787
1795
base_sf_num = mlx5_sf_start_function_id (dev );
1788
1796
for (i = 0 ; i < mlx5_sf_max_functions (dev ); i ++ ) {
1789
1797
err = mlx5_esw_vport_alloc (esw , idx , base_sf_num + i );
@@ -1883,6 +1891,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1883
1891
goto free_esw ;
1884
1892
1885
1893
esw -> dev = dev ;
1894
+ dev -> priv .eswitch = esw ;
1886
1895
esw -> manager_vport = mlx5_eswitch_manager_vport (dev );
1887
1896
esw -> first_host_vport = mlx5_eswitch_first_host_vport_num (dev );
1888
1897
@@ -1901,7 +1910,6 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1901
1910
if (err )
1902
1911
goto abort ;
1903
1912
1904
- dev -> priv .eswitch = esw ;
1905
1913
err = esw_offloads_init (esw );
1906
1914
if (err )
1907
1915
goto reps_err ;
@@ -2433,3 +2441,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
2433
2441
dev -> num_ipsec_offloads -- ;
2434
2442
mutex_unlock (& esw -> state_lock );
2435
2443
}
2444
+
2445
+ bool mlx5_esw_host_functions_enabled (const struct mlx5_core_dev * dev )
2446
+ {
2447
+ if (!dev -> priv .eswitch )
2448
+ return true;
2449
+
2450
+ return !dev -> priv .eswitch -> esw_funcs .host_funcs_disabled ;
2451
+ }
0 commit comments