Skip to content

Commit

Permalink
add example loading an entire RNTuple into a LLAMA view
Browse files Browse the repository at this point in the history
  • Loading branch information
bernhardmgruber committed Jun 30, 2021
1 parent e1dfd94 commit 7b6f43c
Show file tree
Hide file tree
Showing 3 changed files with 70 additions and 0 deletions.
6 changes: 6 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,12 @@ if (LLAMA_BUILD_EXAMPLES)
message(WARNING "Could not find alpaka. Alpaka examples are disabled.")
endif()

# ROOT examples
find_package(ROOT QUIET)
if (ROOT_FOUND)
add_subdirectory("examples/hep_rntuple")
endif()

# CUDA examples
include(CheckLanguage)
check_language(CUDA)
Expand Down
11 changes: 11 additions & 0 deletions examples/hep_rntuple/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
cmake_minimum_required (VERSION 3.15)
project(llama-hep_rntuple)

set(CMAKE_CXX_STANDARD 17)

find_package(ROOT REQUIRED)
if (NOT TARGET llama::llama)
find_package(llama REQUIRED)
endif()
add_executable(${PROJECT_NAME} hep_rntuple.cpp)
target_link_libraries(${PROJECT_NAME} PRIVATE ROOT::Hist ROOT::Graf ROOT::Gpad ROOT::ROOTNTuple llama::llama)
53 changes: 53 additions & 0 deletions examples/hep_rntuple/hep_rntuple.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// This example uses a non-public CMS NanoAOD file called: ttjet_13tev_june2019_lzma.
// Please ask contact us if you need it.

#include "../common/ttjet_13tev_june2019.hpp"

#include <RConfigure.h>
#define R__HAS_STD_STRING_VIEW
#include <ROOT/RNTuple.hxx>
#include <ROOT/RNTupleDS.hxx>
#include <ROOT/RNTupleModel.hxx>
#include <ROOT/RNTupleOptions.hxx>
#include <ROOT/RNTupleView.hxx>
#include <chrono>
#include <llama/DumpMapping.hpp>
#include <llama/llama.hpp>

int main(int argc, const char* argv[])
{
if (argc != 2)
{
fmt::print("Please specify input file!\n");
return 1;
}

using namespace std::chrono;
using namespace ROOT::Experimental;

auto ntuple = RNTupleReader::Open(RNTupleModel::Create(), "NTuple", argv[1]);
const auto n = ntuple->GetNEntries();

auto start = steady_clock::now();
auto view = llama::allocView(llama::mapping::SoA<llama::ArrayDims<1>, Event, true>{llama::ArrayDims{n}});
fmt::print("Alloc LLAMA view: {}ms\n", duration_cast<milliseconds>(steady_clock::now() - start).count());

std::size_t totalSize = 0;
for (auto i = 0u; i < view.mapping.blobCount; i++)
totalSize += view.mapping.blobSize(i);
fmt::print("Total LLAMA view memory: {}MiB in {} blobs\n", totalSize / 1024 / 1024, view.mapping.blobCount);

start = steady_clock::now();
llama::forEachLeaf<Event>(
[&](auto coord)
{
using Name = llama::GetTag<Event, decltype(coord)>;
using Type = llama::GetType<Event, decltype(coord)>;
auto column = ntuple->GetView<Type>(llama::structName<Name>());
for (std::size_t i = 0; i < n; i++)
view(i)(coord) = column(i);
});
fmt::print("Copy RNTuple -> LLAMA view: {}ms\n", duration_cast<milliseconds>(steady_clock::now() - start).count());

start = steady_clock::now();
}

0 comments on commit 7b6f43c

Please sign in to comment.