Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

aterko-ConvNet_and_RNN_Benchmarks #235

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 17 additions & 2 deletions root/tmva/tmva/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,28 @@ if(ROOT_tmva_FOUND AND ROOT_tmva-cpu_FOUND AND ROOT_imt_FOUND)
RB_ADD_GBENCHMARK(ConvNetCpuBenchmarks
ConvNetCpuBenchmarks.cxx
LABEL short
LIBRARIES Core Tree MathCore RIO Hist TMVA)
LIBRARIES Core Tree MathCore RIO Hist TMVA PyMVA)
endif()

if (ROOT_cuda_FOUND AND ROOT_tmva-gpu_FOUND)
set(DNN_CUDA_LIBRARIES ${CUDA_CUBLAS_LIBRARIES} ${CUDNN_LIBRARIES})
RB_ADD_GBENCHMARK(ConvNetCudaBenchmarks
ConvNetCudaBenchmarks.cxx
LABEL short
LIBRARIES Core Tree MathCore RIO Hist TMVA ${DNN_CUDA_LIBRARIES})
LIBRARIES Core Tree MathCore RIO Hist TMVA PyMVA ${DNN_CUDA_LIBRARIES})
endif()

if(ROOT_tmva_FOUND AND ROOT_tmva-cpu_FOUND AND ROOT_imt_FOUND)
RB_ADD_GBENCHMARK(RNNCpuBenchmarks
RNNCpuBenchmarks.cxx
LABEL short
LIBRARIES Core Tree MathCore Gpad RIO Hist TMVA PyMVA)
endif()

if (ROOT_cuda_FOUND AND ROOT_tmva-gpu_FOUND)
set(DNN_CUDA_LIBRARIES ${CUDA_CUBLAS_LIBRARIES} ${CUDNN_LIBRARIES})
RB_ADD_GBENCHMARK(RNNCudaBenchmarks
RNNCudaBenchmarks.cxx
LABEL short
LIBRARIES Core Tree MathCore Gpad RIO Hist TMVA PyMVA ${DNN_CUDA_LIBRARIES})
endif()
121 changes: 120 additions & 1 deletion root/tmva/tmva/ConvNetBenchmarks.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,12 @@
#include "TMVA/Factory.h"
#include "TMVA/DataLoader.h"
#include "TMVA/Config.h"
#include "TMVA/PyMethodBase.h"
#include "TMVA/DataSetInfo.h"
#include "TFile.h"
#include "TTree.h"
#include "TMacro.h"
#include "TSystem.h"
#include "MakeImageData.h"

void CNN_benchmark(TString archName) {
Expand Down Expand Up @@ -115,4 +118,120 @@ void CNN_benchmark(TString archName) {
factory->TestAllMethods();

outputFile->Close();
}
}


void CNN_Keras_benchmark(TString archName) {

int ntrainEvts = 500;
int ntestEvts = 500;

size_t nx = 32;
size_t ny = 32;

ROOT::EnableImplicitMT(0);
TMVA::Config::Instance();

// for using Keras
if (archName == "CPU") gSystem->Setenv("CUDA_VISIBLE_DEVICES","-1");
gSystem->Setenv("KERAS_BACKEND", "tensorflow");
TMVA::PyMethodBase::PyInitialize();

// Load the input data
TString fname = "imagesData.root";
TString fopt = "CACHEREAD";

// Make some Gaussian Images.
makeImages(ntrainEvts + ntestEvts, nx, ny);

auto input = TFile::Open(fname, fopt);

R__ASSERT(input);

std::cout << "--- Classification : Using input file: " << input->GetName() << std::endl;

// Create a ROOT output file where TMVA will store ntuples, histograms, etc.
TString outfileName( "output.root" );
TFile* outputFile = TFile::Open(outfileName, "RECREATE");

// Creating the factory object
TMVA::Factory *factory = new TMVA::Factory( "TMVAClassification", outputFile,
"!Correlations:!V:!Silent:Color:DrawProgressBar:"
"AnalysisType=Classification:!ModelPersistence:Transformations=None" );
TMVA::DataLoader *dataloader = new TMVA::DataLoader("dataset_cnn_ecal");

TTree *signalTree = (TTree*)input->Get("sgn");
TTree *background = (TTree*)input->Get("bkg");

// add variables (time zero and time 1)
for (size_t j = 0; j < nx * ny; ++j) {
TString varName = TString::Format("var%zu", j);
dataloader->AddVariable(varName, 'F');
}

dataloader->AddSignalTree ( signalTree, 1.0 );
dataloader->AddBackgroundTree( background, 1.0 );

// check given input
auto & datainfo = dataloader->GetDataSetInfo();
auto vars = datainfo.GetListOfVariables();
std::cout << "number of variables is " << vars.size() << std::endl;

TString trainAndTestOpt = TString::Format("nTrain_Signal=%d:nTrain_Background=%d:nTest_Signal=%d:nTest_Background=%d:"
"SplitMode=Random:NormMode=NumEvents:!V",
ntrainEvts, ntrainEvts, ntestEvts, ntestEvts);
TCut mycuts = "";
TCut mycutb = "";
dataloader->PrepareTrainingAndTestTree(mycuts, mycutb, trainAndTestOpt);

TMVA::MsgLogger::InhibitOutput();
dataloader->GetDefaultDataSetInfo().GetDataSet();
TMVA::MsgLogger::EnableOutput();

std::cout << "prepared DATA LOADER " << std::endl;

Info("TMVA_CNN_Classification", "Building convolutional keras model");

TMacro m;
m.AddLine("import tensorflow");
m.AddLine("from tensorflow.keras.models import Sequential");
m.AddLine("from tensorflow.keras.optimizers import Adam");
m.AddLine("from tensorflow.keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Reshape");
m.AddLine("");
m.AddLine("model = Sequential() ");
m.AddLine("model.add(Reshape((32, 32, 1), input_shape = (1024, )))");
m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
"'relu', padding = 'same'))");
m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
"'relu', padding = 'same'))");
m.AddLine("model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2))) ");

m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
"'relu', padding = 'same'))");
m.AddLine("model.add(Conv2D(12, kernel_size = (3, 3), kernel_initializer = 'glorot_normal',activation = "
"'relu', padding = 'same'))");
m.AddLine("model.add(MaxPooling2D(pool_size = (2, 2), strides = (2,2))) ");

m.AddLine("model.add(Flatten())");
m.AddLine("model.add(Dense(64, activation = 'relu')) ");
m.AddLine("model.add(Dense(32, activation = 'relu')) ");
m.AddLine("model.add(Dense(1, activation = 'linear')) ");

m.AddLine("model.compile(loss = 'binary_crossentropy', optimizer = Adam(lr = 0.001), metrics = ['accuracy'])");
m.AddLine("model.save('model_cnn.h5')");
m.AddLine("model.summary()");

m.SaveSource("make_cnn_model.py");
gSystem->Exec("python make_cnn_model.py");

factory->BookMethod(
dataloader, TMVA::Types::kPyKeras, "PyKeras",
"H:!V:VarTransform=None:FilenameModel=model_cnn.h5:tf.keras:"
"FilenameTrainedModel=trained_model_cnn.h5:NumEpochs=10:BatchSize=32:"
"GpuOptions=allow_growth=True");

factory->TrainAllMethods();
factory->TestAllMethods();

outputFile->Close();
}
13 changes: 12 additions & 1 deletion root/tmva/tmva/ConvNetCpuBenchmarks.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,15 @@ static void BM_ConvolutionalNetwork_CPU(benchmark::State &state)
}
BENCHMARK(BM_ConvolutionalNetwork_CPU);

BENCHMARK_MAIN();
static void BM_ConvolutionalNetwork_Keras_CPU(benchmark::State &state)
{
TString architecture("CPU");

// Benchmarking
for (auto _ : state) {
CNN_Keras_benchmark(architecture);
}
}
BENCHMARK(BM_ConvolutionalNetwork_Keras_CPU);

BENCHMARK_MAIN();
11 changes: 11 additions & 0 deletions root/tmva/tmva/ConvNetCudaBenchmarks.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -12,4 +12,15 @@ static void BM_ConvolutionalNetwork_CUDA(benchmark::State &state)
}
BENCHMARK(BM_ConvolutionalNetwork_CUDA);

static void BM_ConvolutionalNetwork_Keras_CUDA(benchmark::State &state)
{
TString architecture("GPU");

// Benchmarking
for (auto _ : state) {
CNN_Keras_benchmark(architecture);
}
}
BENCHMARK(BM_ConvolutionalNetwork_Keras_CUDA);

BENCHMARK_MAIN();
Loading