From 1af154557984d8b0f563e059369a2a5647d1934f Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sun, 30 Nov 2025 17:19:06 +0100 Subject: [PATCH 1/6] [tmva][pymva] Move some files back from sofie/tests The disentanglement of PyMVA and SOFIE accidentally moved too many test files. Follows up on 178a9f9c9f7b, which should have only moved `generatePyTorchModels.py`, but it also moved other files with similar names that are used for PyMVA tests. --- tmva/pymva/test/CMakeLists.txt | 4 ++++ .../test/generatePyTorchModelClassification.py | 0 tmva/{sofie => pymva}/test/generatePyTorchModelMulticlass.py | 0 tmva/{sofie => pymva}/test/generatePyTorchModelRegression.py | 0 tmva/sofie/test/CMakeLists.txt | 3 --- 5 files changed, 4 insertions(+), 3 deletions(-) rename tmva/{sofie => pymva}/test/generatePyTorchModelClassification.py (100%) rename tmva/{sofie => pymva}/test/generatePyTorchModelMulticlass.py (100%) rename tmva/{sofie => pymva}/test/generatePyTorchModelRegression.py (100%) diff --git a/tmva/pymva/test/CMakeLists.txt b/tmva/pymva/test/CMakeLists.txt index fb5899fb0fd4f..52014fdd4e253 100644 --- a/tmva/pymva/test/CMakeLists.txt +++ b/tmva/pymva/test/CMakeLists.txt @@ -55,6 +55,10 @@ endif(ROOT_SKLEARN_FOUND) # Enable tests based on available python modules if(ROOT_TORCH_FOUND) + configure_file(generatePyTorchModelClassification.py generatePyTorchModelClassification.py COPYONLY) + configure_file(generatePyTorchModelMulticlass.py generatePyTorchModelMulticlass.py COPYONLY) + configure_file(generatePyTorchModelRegression.py generatePyTorchModelRegression.py COPYONLY) + # Test PyTorch: Binary classification if (ROOT_SKLEARN_FOUND) diff --git a/tmva/sofie/test/generatePyTorchModelClassification.py b/tmva/pymva/test/generatePyTorchModelClassification.py similarity index 100% rename from tmva/sofie/test/generatePyTorchModelClassification.py rename to tmva/pymva/test/generatePyTorchModelClassification.py diff --git a/tmva/sofie/test/generatePyTorchModelMulticlass.py b/tmva/pymva/test/generatePyTorchModelMulticlass.py similarity index 100% rename from tmva/sofie/test/generatePyTorchModelMulticlass.py rename to tmva/pymva/test/generatePyTorchModelMulticlass.py diff --git a/tmva/sofie/test/generatePyTorchModelRegression.py b/tmva/pymva/test/generatePyTorchModelRegression.py similarity index 100% rename from tmva/sofie/test/generatePyTorchModelRegression.py rename to tmva/pymva/test/generatePyTorchModelRegression.py diff --git a/tmva/sofie/test/CMakeLists.txt b/tmva/sofie/test/CMakeLists.txt index fc72127c14d84..2b4b558d3c1e6 100644 --- a/tmva/sofie/test/CMakeLists.txt +++ b/tmva/sofie/test/CMakeLists.txt @@ -145,9 +145,6 @@ endif() # Any features that link against libpython are disabled if built with tpython=OFF if (tpython AND ROOT_TORCH_FOUND AND ROOT_ONNX_FOUND AND BLAS_FOUND AND NOT broken_onnx) - configure_file(generatePyTorchModelClassification.py generatePyTorchModelClassification.py COPYONLY) - configure_file(generatePyTorchModelMulticlass.py generatePyTorchModelMulticlass.py COPYONLY) - configure_file(generatePyTorchModelRegression.py generatePyTorchModelRegression.py COPYONLY) configure_file(generatePyTorchModels.py generatePyTorchModels.py COPYONLY) # Test RModelParser_PyTorch From c8ae7232375a4e9db765f583db9e118c87663b32 Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sun, 30 Nov 2025 18:49:31 +0100 Subject: [PATCH 2/6] [tmva][sofie] Check `tmva-sofie` build option for SOFIE tutorials veto Follows up on 178a9f9c9f7b, where the SOFIE Keras and PyTorch parsers were moved from `tmva/pymva` to `tmva/sofie_parsers` and are now conditional on the `tmva-sofie` and not `tmva-pymva` configuration flag. It was forgotten to update the corresponding configuration checks in the CMakeLists.txt of the tutorials. --- tutorials/CMakeLists.txt | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tutorials/CMakeLists.txt b/tutorials/CMakeLists.txt index 4320a5104be84..75135f451a82c 100644 --- a/tutorials/CMakeLists.txt +++ b/tutorials/CMakeLists.txt @@ -350,18 +350,17 @@ else() list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RSofieReader.C) endif() - if (NOT tmva-pymva OR NOT ROOT_KERAS_FOUND) + if (NOT tmva-sofie OR NOT ROOT_KERAS_FOUND) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Keras.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Keras_HiggsModel.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame_JIT.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RSofieReader.C) endif() - if (NOT tmva-pymva OR NOT ROOT_TORCH_FOUND) + if (NOT tmva-sofie OR NOT ROOT_TORCH_FOUND) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_PyTorch.C) endif() - # The following tutorials use PyMVA functionality - if (NOT tmva-pymva) + if (NOT tmva-sofie) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.py) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Models.py) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Inference.py) From 1d9d73c94e1c19c53e44d2741e95f0309052cefb Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Sun, 30 Nov 2025 19:37:14 +0100 Subject: [PATCH 3/6] [tmva][sofie] Don't call PyMVA init functions in SOFIE tutorials There is no need for that, and they are not available if ROOT was built with `tmva-pymva=OFF`. --- tutorials/machine_learning/TMVA_SOFIE_Inference.py | 3 --- tutorials/machine_learning/TMVA_SOFIE_Keras.C | 3 +-- tutorials/machine_learning/TMVA_SOFIE_Models.py | 2 -- tutorials/machine_learning/TMVA_SOFIE_PyTorch.C | 3 +-- tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py | 2 -- tutorials/machine_learning/TMVA_SOFIE_RDataFrame_JIT.C | 2 -- 6 files changed, 2 insertions(+), 13 deletions(-) diff --git a/tutorials/machine_learning/TMVA_SOFIE_Inference.py b/tutorials/machine_learning/TMVA_SOFIE_Inference.py index ebcdf8199c312..6f04974519af1 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_Inference.py +++ b/tutorials/machine_learning/TMVA_SOFIE_Inference.py @@ -18,9 +18,6 @@ import numpy as np -ROOT.TMVA.PyMethodBase.PyInitialize() - - # check if the input file exists modelFile = "Higgs_trained_model.h5" if (ROOT.gSystem.AccessPathName(modelFile)) : diff --git a/tutorials/machine_learning/TMVA_SOFIE_Keras.C b/tutorials/machine_learning/TMVA_SOFIE_Keras.C index 651e2ed35c50d..886414d9dc277 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_Keras.C +++ b/tutorials/machine_learning/TMVA_SOFIE_Keras.C @@ -40,13 +40,12 @@ model.save('KerasModel.h5')\n"; void TMVA_SOFIE_Keras(const char * modelFile = nullptr, bool printModelInfo = true){ //Running the Python script to generate Keras .h5 file - TMVA::PyMethodBase::PyInitialize(); if (modelFile == nullptr) { TMacro m; m.AddLine(pythonSrc); m.SaveSource("make_keras_model.py"); - gSystem->Exec(TMVA::Python_Executable() + " make_keras_model.py"); + gSystem->Exec("python3 make_keras_model.py"); modelFile = "KerasModel.h5"; } diff --git a/tutorials/machine_learning/TMVA_SOFIE_Models.py b/tutorials/machine_learning/TMVA_SOFIE_Models.py index 469e22940c77c..3ef4bffa434f5 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_Models.py +++ b/tutorials/machine_learning/TMVA_SOFIE_Models.py @@ -16,8 +16,6 @@ import ROOT from os.path import exists -ROOT.TMVA.PyMethodBase.PyInitialize() - ## generate and train Keras models with different architectures diff --git a/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C b/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C index dbf1ca5dd1a34..daa53378255b3 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C +++ b/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C @@ -42,12 +42,11 @@ torch.jit.save(m,'PyTorchModel.pt')\n"; void TMVA_SOFIE_PyTorch(){ //Running the Python script to generate PyTorch .pt file - TMVA::PyMethodBase::PyInitialize(); TMacro m; m.AddLine(pythonSrc); m.SaveSource("make_pytorch_model.py"); - gSystem->Exec(TMVA::Python_Executable() + " make_pytorch_model.py"); + gSystem->Exec("python3 make_pytorch_model.py"); //Parsing a PyTorch model requires the shape and data-type of input tensor //Data-type of input tensor defaults to Float if not specified diff --git a/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py b/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py index 6185684f31b66..241ddb9ecd93b 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py +++ b/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py @@ -13,8 +13,6 @@ import ROOT from os.path import exists -ROOT.TMVA.PyMethodBase.PyInitialize() - # check if the input file exists modelFile = "Higgs_trained_model.h5" diff --git a/tutorials/machine_learning/TMVA_SOFIE_RDataFrame_JIT.C b/tutorials/machine_learning/TMVA_SOFIE_RDataFrame_JIT.C index 0969f5f33b4ea..135a52ff73c04 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_RDataFrame_JIT.C +++ b/tutorials/machine_learning/TMVA_SOFIE_RDataFrame_JIT.C @@ -40,8 +40,6 @@ void CompileModelForRDF(const std::string & headerModelFile, unsigned int ninput void TMVA_SOFIE_RDataFrame_JIT(std::string modelFile = "Higgs_trained_model.h5"){ - TMVA::PyMethodBase::PyInitialize(); - // check if the input file exists if (gSystem->AccessPathName(modelFile.c_str())) { Info("TMVA_SOFIE_RDataFrame","You need to run TMVA_Higgs_Classification.C to generate the Keras trained model"); From b071362b08d44a6f4c3130466873ffdbf86700cc Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Mon, 1 Dec 2025 10:58:49 +0100 Subject: [PATCH 4/6] [tmva][sofie] Apply clang-format to tutorials --- tutorials/machine_learning/TMVA_SOFIE_Keras.C | 16 ++--- .../machine_learning/TMVA_SOFIE_PyTorch.C | 61 ++++++++++--------- 2 files changed, 39 insertions(+), 38 deletions(-) diff --git a/tutorials/machine_learning/TMVA_SOFIE_Keras.C b/tutorials/machine_learning/TMVA_SOFIE_Keras.C index 886414d9dc277..b8a4bf5366c71 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_Keras.C +++ b/tutorials/machine_learning/TMVA_SOFIE_Keras.C @@ -39,15 +39,15 @@ model.save('KerasModel.h5')\n"; void TMVA_SOFIE_Keras(const char * modelFile = nullptr, bool printModelInfo = true){ - //Running the Python script to generate Keras .h5 file + // Running the Python script to generate Keras .h5 file - if (modelFile == nullptr) { - TMacro m; - m.AddLine(pythonSrc); - m.SaveSource("make_keras_model.py"); - gSystem->Exec("python3 make_keras_model.py"); - modelFile = "KerasModel.h5"; - } + if (modelFile == nullptr) { + TMacro m; + m.AddLine(pythonSrc); + m.SaveSource("make_keras_model.py"); + gSystem->Exec("python3 make_keras_model.py"); + modelFile = "KerasModel.h5"; + } //Parsing the saved Keras .h5 file into RModel object SOFIE::RModel model = SOFIE::PyKeras::Parse(modelFile); diff --git a/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C b/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C index daa53378255b3..1eb5ae9d74b80 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C +++ b/tutorials/machine_learning/TMVA_SOFIE_PyTorch.C @@ -41,44 +41,45 @@ torch.jit.save(m,'PyTorchModel.pt')\n"; void TMVA_SOFIE_PyTorch(){ - //Running the Python script to generate PyTorch .pt file + // Running the Python script to generate PyTorch .pt file - TMacro m; - m.AddLine(pythonSrc); - m.SaveSource("make_pytorch_model.py"); - gSystem->Exec("python3 make_pytorch_model.py"); + TMacro m; + m.AddLine(pythonSrc); + m.SaveSource("make_pytorch_model.py"); + gSystem->Exec("python3 make_pytorch_model.py"); - //Parsing a PyTorch model requires the shape and data-type of input tensor - //Data-type of input tensor defaults to Float if not specified - std::vector inputTensorShapeSequential{2,32}; - std::vector> inputShapesSequential{inputTensorShapeSequential}; + // Parsing a PyTorch model requires the shape and data-type of input tensor + // Data-type of input tensor defaults to Float if not specified + std::vector inputTensorShapeSequential{2, 32}; + std::vector> inputShapesSequential{inputTensorShapeSequential}; - //Parsing the saved PyTorch .pt file into RModel object - SOFIE::RModel model = SOFIE::PyTorch::Parse("PyTorchModel.pt",inputShapesSequential); + // Parsing the saved PyTorch .pt file into RModel object + SOFIE::RModel model = SOFIE::PyTorch::Parse("PyTorchModel.pt", inputShapesSequential); - //Generating inference code - model.Generate(); - model.OutputGenerated("PyTorchModel.hxx"); + // Generating inference code + model.Generate(); + model.OutputGenerated("PyTorchModel.hxx"); - //Printing required input tensors - std::cout<<"\n\n"; - model.PrintRequiredInputTensors(); + // Printing required input tensors + std::cout << "\n\n"; + model.PrintRequiredInputTensors(); - //Printing initialized tensors (weights) - std::cout<<"\n\n"; - model.PrintInitializedTensors(); + // Printing initialized tensors (weights) + std::cout << "\n\n"; + model.PrintInitializedTensors(); - //Printing intermediate tensors - std::cout<<"\n\n"; - model.PrintIntermediateTensors(); + // Printing intermediate tensors + std::cout << "\n\n"; + model.PrintIntermediateTensors(); - //Checking if tensor already exist in model - std::cout<<"\n\nTensor \"0weight\" already exist: "< tensorShape = model.GetTensorShape("0weight"); - std::cout<<"Shape of tensor \"0weight\": "; - for(auto& it:tensorShape){ - std::cout< tensorShape = model.GetTensorShape("0weight"); + std::cout << "Shape of tensor \"0weight\": "; + for (auto &it : tensorShape) { + std::cout << it << ","; + } std::cout<<"\n\nData type of tensor \"0weight\": "; SOFIE::ETensorType tensorType = model.GetTensorType("0weight"); std::cout< Date: Sun, 30 Nov 2025 20:16:49 +0100 Subject: [PATCH 5/6] [tmva][sofie] Make SOFIE Keras tutorials dependent on Keras version The same was already done for the Keras unit tests when PyMVA and SOFIE were disentangled, but the same thing also has to be done for the tutorials. --- tutorials/CMakeLists.txt | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/tutorials/CMakeLists.txt b/tutorials/CMakeLists.txt index 75135f451a82c..9d0b23f214f7c 100644 --- a/tutorials/CMakeLists.txt +++ b/tutorials/CMakeLists.txt @@ -345,24 +345,43 @@ else() ROOT_FIND_PYTHON_MODULE(keras) ROOT_FIND_PYTHON_MODULE(sonnet) ROOT_FIND_PYTHON_MODULE(graph_nets) + + # Check if we support the installed Keras version. Otherwise, veto SOFIE + # Keras tutorials. This mirrors the logic in tmva/sofie/test/CMakeLists.txt. + # TODO: make sure we also support the newest Keras + set(unsupported_keras_version "3.5.0") + if (NOT DEFINED ROOT_KERAS_VERSION) + message(WARNING "Keras found, but version unknown — cannot verify compatibility.") + elseif (NOT ROOT_KERAS_VERSION VERSION_LESS ${unsupported_keras_version}) + message(WARNING "Keras version ${ROOT_KERAS_VERSION} is too new for the SOFIE Keras parser (only supports < ${unsupported_keras_version}). Corresponding tutorials will not be tested.") + set(keras_unsupported TRUE) + endif() + if (NOT BLAS_FOUND) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_GNN_Application.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RSofieReader.C) endif() - if (NOT tmva-sofie OR NOT ROOT_KERAS_FOUND) + if (NOT tmva-sofie OR NOT ROOT_KERAS_FOUND OR keras_unsupported) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Keras.C) + list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Models.py) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Keras_HiggsModel.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame_JIT.C) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RSofieReader.C) endif() + if (NOT tmva-pymva) + # These SOFIE tutorials take models trained via PyMVA-PyKeras as input + list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Keras_HiggsModel.C) + list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.C) + list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.py) + list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RSofieReader.C) + endif() if (NOT tmva-sofie OR NOT ROOT_TORCH_FOUND) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_PyTorch.C) endif() if (NOT tmva-sofie) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_RDataFrame.py) - list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Models.py) list(APPEND tmva_veto machine_learning/TMVA_SOFIE_Inference.py) endif() #veto this tutorial since it is added directly From 76b0ed3f6b13889b682cb6f43740ed17f13f616f Mon Sep 17 00:00:00 2001 From: Jonas Rembser Date: Mon, 1 Dec 2025 10:27:08 +0100 Subject: [PATCH 6/6] [tmva] Implement linter suggestions in Python files --- .../generatePyTorchModelClassification.py | 27 ++++++++----------- .../test/generatePyTorchModelMulticlass.py | 27 ++++++++----------- .../test/generatePyTorchModelRegression.py | 25 ++++++++--------- .../machine_learning/TMVA_SOFIE_Inference.py | 3 +-- .../machine_learning/TMVA_SOFIE_Models.py | 14 +++++----- .../machine_learning/TMVA_SOFIE_RDataFrame.py | 6 ++--- 6 files changed, 43 insertions(+), 59 deletions(-) diff --git a/tmva/pymva/test/generatePyTorchModelClassification.py b/tmva/pymva/test/generatePyTorchModelClassification.py index c19cfe1bf3c00..42b6e11b43667 100644 --- a/tmva/pymva/test/generatePyTorchModelClassification.py +++ b/tmva/pymva/test/generatePyTorchModelClassification.py @@ -2,11 +2,7 @@ from torch import nn # Define model -model = nn.Sequential( - nn.Linear(4, 64), - nn.ReLU(), - nn.Linear(64, 2), - nn.Softmax(dim=1)) +model = nn.Sequential(nn.Linear(4, 64), nn.ReLU(), nn.Linear(64, 2), nn.Softmax(dim=1)) # Construct loss function and Optimizer. criterion = torch.nn.MSELoss() @@ -33,8 +29,8 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit # print train statistics running_train_loss += train_loss.item() - if i % 32 == 31: # print every 32 mini-batches - print(f"[{epoch+1}, {i+1}] train loss: {running_train_loss / 32 :.3f}") + if i % 32 == 31: # print every 32 mini-batches + print(f"[{epoch + 1}, {i + 1}] train loss: {running_train_loss / 32:.3f}") running_train_loss = 0.0 if schedule: @@ -51,15 +47,15 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit curr_val = running_val_loss / len(val_loader) if save_best: - if best_val==None: - best_val = curr_val - best_val = save_best(model, curr_val, best_val) + if best_val is None: + best_val = curr_val + best_val = save_best(model, curr_val, best_val) # print val statistics per epoch - print(f"[{epoch+1}] val loss: {curr_val :.3f}") + print(f"[{epoch + 1}] val loss: {curr_val:.3f}") running_val_loss = 0.0 - print(f"Finished Training on {epoch+1} Epochs!") + print(f"Finished Training on {epoch + 1} Epochs!") return model @@ -67,7 +63,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit def predict(model, test_X, batch_size=32): # Set to eval mode model.eval() - + test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X)) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) @@ -78,7 +74,7 @@ def predict(model, test_X, batch_size=32): outputs = model(X) predictions.append(outputs) preds = torch.cat(predictions) - + return preds.numpy() @@ -86,5 +82,4 @@ def predict(model, test_X, batch_size=32): # Store model to file m = torch.jit.script(model) -torch.jit.save(m,"PyTorchModelClassification.pt") - +torch.jit.save(m, "PyTorchModelClassification.pt") diff --git a/tmva/pymva/test/generatePyTorchModelMulticlass.py b/tmva/pymva/test/generatePyTorchModelMulticlass.py index 88b787f48c132..a6fee1f51df4f 100644 --- a/tmva/pymva/test/generatePyTorchModelMulticlass.py +++ b/tmva/pymva/test/generatePyTorchModelMulticlass.py @@ -2,11 +2,7 @@ from torch import nn # Define model -model = nn.Sequential( - nn.Linear(4, 64), - nn.ReLU(), - nn.Linear(64, 4), - nn.Softmax(dim=1)) +model = nn.Sequential(nn.Linear(4, 64), nn.ReLU(), nn.Linear(64, 4), nn.Softmax(dim=1)) # Construct loss function and Optimizer. criterion = nn.CrossEntropyLoss() @@ -34,8 +30,8 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit # print train statistics running_train_loss += train_loss.item() - if i % 4 == 3: # print every 4 mini-batches - print(f"[{epoch+1}, {i+1}] train loss: {running_train_loss / 4 :.3f}") + if i % 4 == 3: # print every 4 mini-batches + print(f"[{epoch + 1}, {i + 1}] train loss: {running_train_loss / 4:.3f}") running_train_loss = 0.0 if schedule: @@ -53,15 +49,15 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit curr_val = running_val_loss / len(val_loader) if save_best: - if best_val==None: - best_val = curr_val - best_val = save_best(model, curr_val, best_val) + if best_val is None: + best_val = curr_val + best_val = save_best(model, curr_val, best_val) # print val statistics per epoch - print(f"[{epoch+1}] val loss: {curr_val :.3f}") + print(f"[{epoch + 1}] val loss: {curr_val:.3f}") running_val_loss = 0.0 - print(f"Finished Training on {epoch+1} Epochs!") + print(f"Finished Training on {epoch + 1} Epochs!") return model @@ -69,7 +65,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit def predict(model, test_X, batch_size=32): # Set to eval mode model.eval() - + test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X)) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) @@ -80,7 +76,7 @@ def predict(model, test_X, batch_size=32): outputs = model(X) predictions.append(outputs) preds = torch.cat(predictions) - + return preds.numpy() @@ -88,5 +84,4 @@ def predict(model, test_X, batch_size=32): # Store model to file m = torch.jit.script(model) -torch.jit.save(m,"PyTorchModelMulticlass.pt") - +torch.jit.save(m, "PyTorchModelMulticlass.pt") diff --git a/tmva/pymva/test/generatePyTorchModelRegression.py b/tmva/pymva/test/generatePyTorchModelRegression.py index 2622d43fc20b0..a7f529914833d 100644 --- a/tmva/pymva/test/generatePyTorchModelRegression.py +++ b/tmva/pymva/test/generatePyTorchModelRegression.py @@ -2,10 +2,7 @@ from torch import nn # Define model -model = nn.Sequential( - nn.Linear(2, 64), - nn.Tanh(), - nn.Linear(64, 1)) +model = nn.Sequential(nn.Linear(2, 64), nn.Tanh(), nn.Linear(64, 1)) # Construct loss function and Optimizer. criterion = torch.nn.MSELoss() @@ -32,8 +29,8 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit # print train statistics running_train_loss += train_loss.item() - if i % 32 == 31: # print every 32 mini-batches - print(f"[{epoch+1}, {i+1}] train loss: {running_train_loss / 32 :.3f}") + if i % 32 == 31: # print every 32 mini-batches + print(f"[{epoch + 1}, {i + 1}] train loss: {running_train_loss / 32:.3f}") running_train_loss = 0.0 if schedule: @@ -50,15 +47,15 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit curr_val = running_val_loss / len(val_loader) if save_best: - if best_val==None: - best_val = curr_val - best_val = save_best(model, curr_val, best_val) + if best_val is None: + best_val = curr_val + best_val = save_best(model, curr_val, best_val) # print val statistics per epoch - print(f"[{epoch+1}] val loss: {curr_val :.3f}") + print(f"[{epoch + 1}] val loss: {curr_val:.3f}") running_val_loss = 0.0 - print(f"Finished Training on {epoch+1} Epochs!") + print(f"Finished Training on {epoch + 1} Epochs!") return model @@ -66,7 +63,7 @@ def fit(model, train_loader, val_loader, num_epochs, batch_size, optimizer, crit def predict(model, test_X, batch_size=32): # Set to eval mode model.eval() - + test_dataset = torch.utils.data.TensorDataset(torch.Tensor(test_X)) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False) @@ -77,7 +74,7 @@ def predict(model, test_X, batch_size=32): outputs = model(X) predictions.append(outputs) preds = torch.cat(predictions) - + return preds.numpy() @@ -85,4 +82,4 @@ def predict(model, test_X, batch_size=32): # Store model to file m = torch.jit.script(model) -torch.jit.save(m,"PyTorchModelRegression.pt") \ No newline at end of file +torch.jit.save(m, "PyTorchModelRegression.pt") diff --git a/tutorials/machine_learning/TMVA_SOFIE_Inference.py b/tutorials/machine_learning/TMVA_SOFIE_Inference.py index 6f04974519af1..1b150bdbaa4ca 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_Inference.py +++ b/tutorials/machine_learning/TMVA_SOFIE_Inference.py @@ -14,9 +14,8 @@ ### \macro_output ### \author Lorenzo Moneta -import ROOT import numpy as np - +import ROOT # check if the input file exists modelFile = "Higgs_trained_model.h5" diff --git a/tutorials/machine_learning/TMVA_SOFIE_Models.py b/tutorials/machine_learning/TMVA_SOFIE_Models.py index 3ef4bffa434f5..25d1931870bfe 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_Models.py +++ b/tutorials/machine_learning/TMVA_SOFIE_Models.py @@ -13,18 +13,17 @@ ### \macro_output ### \author Lorenzo Moneta -import ROOT -from os.path import exists - - -## generate and train Keras models with different architectures +import os import numpy as np -from tensorflow.keras.models import Sequential +import ROOT +from sklearn.model_selection import train_test_split from tensorflow.keras.layers import Dense +from tensorflow.keras.models import Sequential from tensorflow.keras.optimizers import Adam -from sklearn.model_selection import train_test_split +## generate and train Keras models with different architectures + def CreateModel(nlayers = 4, nunits = 64): model = Sequential() @@ -101,7 +100,6 @@ def GenerateModelCode(modelFile, generatedHeaderFile): generatedHeaderFile = "Higgs_Model.hxx" #need to remove existing header file since we are appending on same one -import os if (os.path.exists(generatedHeaderFile)): weightFile = "Higgs_Model.root" print("removing existing files", generatedHeaderFile,weightFile) diff --git a/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py b/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py index 241ddb9ecd93b..0b815032be55a 100644 --- a/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py +++ b/tutorials/machine_learning/TMVA_SOFIE_RDataFrame.py @@ -10,13 +10,13 @@ ### \macro_output ### \author Lorenzo Moneta -import ROOT from os.path import exists +import ROOT # check if the input file exists modelFile = "Higgs_trained_model.h5" -modelName = "Higgs_trained_model"; +modelName = "Higgs_trained_model" if not exists(modelFile): raise FileNotFoundError("You need to run TMVA_Higgs_Classification.C to generate the Keras trained model") @@ -43,7 +43,7 @@ h2 = df2.Define("DNN_Value", "sofie_functor(rdfslot_,m_jj, m_jjj, m_lv, m_jlv, m_bb, m_wbb, m_wwbb)").Histo1D(("h_bkg", "", 100, 0, 1),"DNN_Value") # run over the input data once, combining both RDataFrame graphs. -ROOT.RDF.RunGraphs([h1, h2]); +ROOT.RDF.RunGraphs([h1, h2]) print("Number of signal entries",h1.GetEntries()) print("Number of background entries",h2.GetEntries())