@@ -1038,6 +1038,25 @@ const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1038
1038
return ERR_PTR (err );
1039
1039
}
1040
1040
1041
+ static int mlx5_esw_host_functions_enabled_query (struct mlx5_eswitch * esw )
1042
+ {
1043
+ const u32 * query_host_out ;
1044
+
1045
+ if (!mlx5_core_is_ecpf_esw_manager (esw -> dev ))
1046
+ return 0 ;
1047
+
1048
+ query_host_out = mlx5_esw_query_functions (esw -> dev );
1049
+ if (IS_ERR (query_host_out ))
1050
+ return PTR_ERR (query_host_out );
1051
+
1052
+ esw -> esw_funcs .host_funcs_disabled =
1053
+ MLX5_GET (query_esw_functions_out , query_host_out ,
1054
+ host_params_context .host_pf_not_exist );
1055
+
1056
+ kvfree (query_host_out );
1057
+ return 0 ;
1058
+ }
1059
+
1041
1060
static void mlx5_eswitch_event_handler_register (struct mlx5_eswitch * esw )
1042
1061
{
1043
1062
if (esw -> mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler (esw -> dev )) {
@@ -1278,17 +1297,19 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1278
1297
esw -> mode == MLX5_ESWITCH_LEGACY ;
1279
1298
1280
1299
/* Enable PF vport */
1281
- if (pf_needed ) {
1300
+ if (pf_needed && mlx5_esw_host_functions_enabled ( esw -> dev ) ) {
1282
1301
ret = mlx5_eswitch_load_pf_vf_vport (esw , MLX5_VPORT_PF ,
1283
1302
enabled_events );
1284
1303
if (ret )
1285
1304
return ret ;
1286
1305
}
1287
1306
1288
- /* Enable external host PF HCA */
1289
- ret = host_pf_enable_hca (esw -> dev );
1290
- if (ret )
1291
- goto pf_hca_err ;
1307
+ if (mlx5_esw_host_functions_enabled (esw -> dev )) {
1308
+ /* Enable external host PF HCA */
1309
+ ret = host_pf_enable_hca (esw -> dev );
1310
+ if (ret )
1311
+ goto pf_hca_err ;
1312
+ }
1292
1313
1293
1314
/* Enable ECPF vport */
1294
1315
if (mlx5_ecpf_vport_exists (esw -> dev )) {
@@ -1320,9 +1341,10 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1320
1341
if (mlx5_ecpf_vport_exists (esw -> dev ))
1321
1342
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_ECPF );
1322
1343
ecpf_err :
1323
- host_pf_disable_hca (esw -> dev );
1344
+ if (mlx5_esw_host_functions_enabled (esw -> dev ))
1345
+ host_pf_disable_hca (esw -> dev );
1324
1346
pf_hca_err :
1325
- if (pf_needed )
1347
+ if (pf_needed && mlx5_esw_host_functions_enabled ( esw -> dev ) )
1326
1348
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_PF );
1327
1349
return ret ;
1328
1350
}
@@ -1342,10 +1364,12 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1342
1364
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_ECPF );
1343
1365
}
1344
1366
1345
- host_pf_disable_hca (esw -> dev );
1367
+ if (mlx5_esw_host_functions_enabled (esw -> dev ))
1368
+ host_pf_disable_hca (esw -> dev );
1346
1369
1347
- if (mlx5_core_is_ecpf_esw_manager (esw -> dev ) ||
1348
- esw -> mode == MLX5_ESWITCH_LEGACY )
1370
+ if ((mlx5_core_is_ecpf_esw_manager (esw -> dev ) ||
1371
+ esw -> mode == MLX5_ESWITCH_LEGACY ) &&
1372
+ mlx5_esw_host_functions_enabled (esw -> dev ))
1349
1373
mlx5_eswitch_unload_pf_vf_vport (esw , MLX5_VPORT_PF );
1350
1374
}
1351
1375
@@ -1674,7 +1698,8 @@ int mlx5_esw_sf_max_hpf_functions(struct mlx5_core_dev *dev, u16 *max_sfs, u16 *
1674
1698
void * hca_caps ;
1675
1699
int err ;
1676
1700
1677
- if (!mlx5_core_is_ecpf (dev )) {
1701
+ if (!mlx5_core_is_ecpf (dev ) ||
1702
+ !mlx5_esw_host_functions_enabled (dev )) {
1678
1703
* max_sfs = 0 ;
1679
1704
return 0 ;
1680
1705
}
@@ -1750,21 +1775,23 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
1750
1775
1751
1776
xa_init (& esw -> vports );
1752
1777
1753
- err = mlx5_esw_vport_alloc (esw , idx , MLX5_VPORT_PF );
1754
- if (err )
1755
- goto err ;
1756
- if (esw -> first_host_vport == MLX5_VPORT_PF )
1757
- xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1758
- idx ++ ;
1759
-
1760
- for (i = 0 ; i < mlx5_core_max_vfs (dev ); i ++ ) {
1761
- err = mlx5_esw_vport_alloc (esw , idx , idx );
1778
+ if (mlx5_esw_host_functions_enabled (dev )) {
1779
+ err = mlx5_esw_vport_alloc (esw , idx , MLX5_VPORT_PF );
1762
1780
if (err )
1763
1781
goto err ;
1764
- xa_set_mark ( & esw -> vports , idx , MLX5_ESW_VPT_VF );
1765
- xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1782
+ if ( esw -> first_host_vport == MLX5_VPORT_PF )
1783
+ xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1766
1784
idx ++ ;
1785
+ for (i = 0 ; i < mlx5_core_max_vfs (dev ); i ++ ) {
1786
+ err = mlx5_esw_vport_alloc (esw , idx , idx );
1787
+ if (err )
1788
+ goto err ;
1789
+ xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_VF );
1790
+ xa_set_mark (& esw -> vports , idx , MLX5_ESW_VPT_HOST_FN );
1791
+ idx ++ ;
1792
+ }
1767
1793
}
1794
+
1768
1795
base_sf_num = mlx5_sf_start_function_id (dev );
1769
1796
for (i = 0 ; i < mlx5_sf_max_functions (dev ); i ++ ) {
1770
1797
err = mlx5_esw_vport_alloc (esw , idx , base_sf_num + i );
@@ -1864,6 +1891,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1864
1891
goto free_esw ;
1865
1892
1866
1893
esw -> dev = dev ;
1894
+ dev -> priv .eswitch = esw ;
1867
1895
esw -> manager_vport = mlx5_eswitch_manager_vport (dev );
1868
1896
esw -> first_host_vport = mlx5_eswitch_first_host_vport_num (dev );
1869
1897
@@ -1874,11 +1902,14 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1874
1902
goto abort ;
1875
1903
}
1876
1904
1905
+ err = mlx5_esw_host_functions_enabled_query (esw );
1906
+ if (err )
1907
+ goto abort ;
1908
+
1877
1909
err = mlx5_esw_vports_init (esw );
1878
1910
if (err )
1879
1911
goto abort ;
1880
1912
1881
- dev -> priv .eswitch = esw ;
1882
1913
err = esw_offloads_init (esw );
1883
1914
if (err )
1884
1915
goto reps_err ;
@@ -2410,3 +2441,11 @@ void mlx5_eswitch_unblock_ipsec(struct mlx5_core_dev *dev)
2410
2441
dev -> num_ipsec_offloads -- ;
2411
2442
mutex_unlock (& esw -> state_lock );
2412
2443
}
2444
+
2445
+ bool mlx5_esw_host_functions_enabled (const struct mlx5_core_dev * dev )
2446
+ {
2447
+ if (!dev -> priv .eswitch )
2448
+ return true;
2449
+
2450
+ return !dev -> priv .eswitch -> esw_funcs .host_funcs_disabled ;
2451
+ }
0 commit comments