Skip to content

Commit 45dd800

Browse files
committed
Fix the configuration to find Blas and decide either to build CPU or GPU architecture. Define this in RCondifure.h
1 parent 4eac0a6 commit 45dd800

8 files changed

+43
-11
lines changed

cmake/modules/RootBuildOptions.cmake

+2
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,8 @@ ROOT_BUILD_OPTION(table OFF "Build libTable contrib library")
167167
ROOT_BUILD_OPTION(tcmalloc OFF "Using the tcmalloc allocator")
168168
ROOT_BUILD_OPTION(thread ON "Using thread library (cannot be disabled)")
169169
ROOT_BUILD_OPTION(tmva ON "Build TMVA multi variate analysis library")
170+
ROOT_BUILD_OPTION(tmva-cpu ON "Build TMVA with CPU support for deep learning. Requires BLAS")
171+
ROOT_BUILD_OPTION(tmva-gpu ON "Build TMVA with GPU support for deep learning. Requries CUDA")
170172
ROOT_BUILD_OPTION(unuran OFF "UNURAN - package for generating non-uniform random numbers")
171173
ROOT_BUILD_OPTION(vc OFF "Vc adds a few new types for portable and intuitive SIMD programming")
172174
ROOT_BUILD_OPTION(vdt OFF "VDT adds a set of fast and vectorisable mathematical functions")

cmake/modules/RootConfiguration.cmake

+11
Original file line numberDiff line numberDiff line change
@@ -565,6 +565,17 @@ if(qt5web)
565565
else()
566566
set(hasqt5webengine undef)
567567
endif()
568+
if (tmva AND imt AND BLAS_FOUND)
569+
set(hastmvacpu define)
570+
else()
571+
set(hastmvacpu undef)
572+
endif()
573+
if (tmva AND CUDA_FOUND)
574+
set(hastmvagpu define)
575+
else()
576+
set(hastmvagpu undef)
577+
endif()
578+
568579

569580
CHECK_CXX_SOURCE_COMPILES("#include <string_view>
570581
int main() { char arr[3] = {'B', 'a', 'r'}; std::string_view strv(arr, sizeof(arr)); return 0;}" found_stdstringview)

cmake/modules/SearchInstalledSoftware.cmake

+8
Original file line numberDiff line numberDiff line change
@@ -1513,6 +1513,7 @@ if(tmva AND cuda)
15131513
message(STATUS "CUDA not found. Ensure that the installation of CUDA is in the CMAKE_PREFIX_PATH")
15141514
message(STATUS " For the time being switching OFF 'cuda' option")
15151515
set(cuda OFF CACHE BOOL "" FORCE)
1516+
set(tmva-cuda OFF CACHE BOOL "" FORCE)
15161517
endif()
15171518
endif()
15181519
endif()
@@ -1522,6 +1523,13 @@ if(tmva AND imt)
15221523
find_package(BLAS)
15231524
endif()
15241525

1526+
if(NOT BLAS_FOUND)
1527+
set(tmva-cpu OFF CACHE BOOL "" FORCE)
1528+
endif()
1529+
if(NOT CUDA_FOUND)
1530+
set(tmva-gpu OFF CACHE BOOL "" FORCE)
1531+
endif()
1532+
15251533

15261534
#---Download googletest--------------------------------------------------------------
15271535
if (testing)

config/RConfigure.in

+2
Original file line numberDiff line numberDiff line change
@@ -55,5 +55,7 @@
5555
#@usezlib@ R__HAS_DEFAULT_ZLIB /**/
5656
#@uselzma@ R__HAS_DEFAULT_LZMA /**/
5757

58+
#@hastmvacpu@ R__HAS_TMVACPU /**/
59+
#@hastmvagpu@ R__HAS_TMVAGPU /**/
5860

5961
#endif

test/stressTMVA.cxx

+2-2
Original file line numberDiff line numberDiff line change
@@ -3049,12 +3049,12 @@ void addClassificationTests( UnitTestSuite& TMVA_test, bool full=true)
30493049
TString configCpu = "Architecture=CPU:" + config;
30503050
TString configGpu = "Architecture=GPU:" + config;
30513051

3052-
#ifdef DNNCPU
3052+
#ifdef R__HAS_TMVACPU
30533053
TMVA_test.addTest(new MethodUnitTestWithROCLimits(
30543054
TMVA::Types::kDNN, "DNN CPU", configCpu, 0.85, 0.98)
30553055
);
30563056
#endif
3057-
#ifdef DNNCUDA
3057+
#ifdef R__HAS_TMVAGPU
30583058
TMVA_test.addTest(new MethodUnitTestWithROCLimits(
30593059
TMVA::Types::kDNN, "DNN GPU", configGpu, 0.85, 0.98)
30603060
);

tmva/tmva/CMakeLists.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -73,14 +73,14 @@ endif()
7373
#---Handle BLAS dependent code. -----------------
7474
if(BLAS_FOUND AND imt)
7575
message(STATUS "Using TMVA-DNN with BLAS installation")
76-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNNCPU")
76+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
7777
set(DNN_CPU_LIBRARIES MathCore Matrix ${BLAS_LINKER_FLAGS} ${BLAS_LIBRARIES} ${TBB_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
7878
include_directories(SYSTEM ${TBB_INCLUDE_DIRS})
7979
else()
8080
if (mathmore AND imt)
8181
#use GSL cblas installation
8282
message(STATUS "Using TMVA-DNN with gslcblas installation")
83-
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNNCPU -DDNN_USE_CBLAS")
83+
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DDNN_USE_CBLAS")
8484
set(DNN_CPU_LIBRARIES MathCore Matrix ${TBB_LIBRARIES} ${GSL_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
8585
include_directories(SYSTEM ${TBB_INCLUDE_DIRS} ${GSL_INCLUDE_DIR} )
8686
else()

tmva/tmva/inc/TMVA/MethodDL.h

+9-7
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,11 @@
4343

4444
#include "TMVA/DNN/Architectures/Reference.h"
4545

46-
#ifdef DNNCPU
46+
#ifdef R__HAS_TMVACPU
4747
#include "TMVA/DNN/Architectures/Cpu.h"
4848
#endif
4949

50-
#ifdef DNNCUDA
50+
#ifdef R__HAS_TMVACUDA
5151
#include "TMVA/DNN/Architectures/Cuda.h"
5252
#endif
5353

@@ -77,13 +77,13 @@ class MethodDL : public MethodBase {
7777
private:
7878
// Key-Value vector type, contining the values for the training options
7979
using KeyValueVector_t = std::vector<std::map<TString, TString>>;
80-
#ifdef DNNCPU
81-
using ArchitectureCpu_t = TMVA::DNN::TCpu<Double_t>;
80+
#ifdef R__HAS_TMVACPU
81+
using ArchitectureImpl_t = TMVA::DNN::TCpu<Double_t>;
8282
#else
83-
using ArchitectureCpu_t = TMVA::DNN::TReference<Double_t>;
83+
using ArchitectureImpl_t = TMVA::DNN::TReference<Double_t>;
8484
#endif
85-
using DeepNetCpu_t = TMVA::DNN::TDeepNet<ArchitectureCpu_t>;
86-
std::unique_ptr<DeepNetCpu_t> fNet;
85+
using DeepNetImpl_t = TMVA::DNN::TDeepNet<ArchitectureImpl_t>;
86+
std::unique_ptr<DeepNetImpl_t> fNet;
8787

8888
/*! The option handling methods */
8989
void DeclareOptions();
@@ -199,6 +199,8 @@ class MethodDL : public MethodBase {
199199
size_t GetBatchHeight() const { return fBatchHeight; }
200200
size_t GetBatchWidth() const { return fBatchWidth; }
201201

202+
const DeepNetImpl_t & GetDeepNet() const { return *fNet; }
203+
202204
DNN::EInitialization GetWeightInitialization() const { return fWeightInitialization; }
203205
DNN::EOutputFunction GetOutputFunction() const { return fOutputFunction; }
204206
DNN::ELossFunction GetLossFunction() const { return fLossFunction; }

tmva/tmva/inc/TMVA/MethodDNN.h

+7
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,13 @@
5252
#include "TMVA/DNN/Minimizers.h"
5353
#include "TMVA/DNN/Architectures/Reference.h"
5454

55+
#ifdef R__HAS_TMVACPU
56+
#define DNNCPU
57+
#endif
58+
#ifdef R__HAS_TMVAGPU
59+
#define DNNCUDA
60+
#endif
61+
5562
#ifdef DNNCPU
5663
#include "TMVA/DNN/Architectures/Cpu.h"
5764
#endif

0 commit comments

Comments
 (0)