Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix codechecker warnings #5272

Merged
merged 2 commits into from Jan 25, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -36,7 +36,7 @@ class PrimaryVertexContextNV final : public PrimaryVertexContext
{
public:
PrimaryVertexContextNV() = default;
virtual ~PrimaryVertexContextNV() = default;
~PrimaryVertexContextNV() override;

void initialise(const MemoryParameters& memParam, const TrackingParameters& trkParam,
const std::vector<std::vector<Cluster>>& cl, const std::array<float, 3>& pv, const int iteration) override;
Expand All @@ -62,6 +62,8 @@ class PrimaryVertexContextNV final : public PrimaryVertexContext
std::array<gpu::Vector<Cell>, constants::its2::CellsPerRoad - 1> mTempCellArray;
};

inline PrimaryVertexContextNV::~PrimaryVertexContextNV() = default;

inline gpu::DeviceStoreNV& PrimaryVertexContextNV::getDeviceContext()
{
return *mGPUContextDevicePointer;
Expand Down
Expand Up @@ -47,7 +47,7 @@ class VertexerTraitsGPU : public VertexerTraits
~VertexerTraitsGPU() override;
#else
VertexerTraitsGPU();
~VertexerTraitsGPU() = default;
~VertexerTraitsGPU() override;
#endif
void initialise(ROframe*) override;
void computeTracklets() override;
Expand Down
2 changes: 2 additions & 0 deletions Detectors/ITSMFT/ITS/tracking/cuda/src/VertexerTraitsGPU.cu
Expand Up @@ -93,6 +93,8 @@ VertexerTraitsGPU::VertexerTraitsGPU()

#endif

VertexerTraitsGPU::~VertexerTraitsGPU() = default;

void VertexerTraitsGPU::initialise(ROframe* event)
{
reset();
Expand Down
Expand Up @@ -82,8 +82,8 @@ UniquePointer<T>::UniquePointer(const T& ref)
{
try {

Utils::HostHIP::gpuMalloc(reinterpret_cast<void**>(&mDevicePointer), sizeof(T));
Utils::HostHIP::gpuMemcpyHostToDevice(mDevicePointer, &ref, sizeof(T));
utils::host_hip::gpuMalloc(reinterpret_cast<void**>(&mDevicePointer), sizeof(T));
utils::host_hip::gpuMemcpyHostToDevice(mDevicePointer, &ref, sizeof(T));

} catch (...) {

Expand Down Expand Up @@ -119,7 +119,7 @@ void UniquePointer<T>::destroy()
{
if (mDevicePointer != nullptr) {

Utils::HostHIP::gpuFree(mDevicePointer);
utils::host_hip::gpuFree(mDevicePointer);
}
}

Expand Down
Expand Up @@ -26,10 +26,10 @@ namespace its
namespace gpu
{

namespace Utils
namespace utils
{

namespace HostHIP
namespace host_hip
{

#ifdef __HIPCC__
Expand All @@ -50,15 +50,15 @@ void gpuMemcpyHostToDeviceAsync(void*, const void*, int, hipStream_t&);
void gpuMemcpyDeviceToHost(void*, const void*, int);
// void gpuStartProfiler();
// void gpuStopProfiler();
} // namespace Host
} // namespace host_hip
//
namespace DeviceHIP
namespace device_hip
{
GPUd() int getLaneIndex();
GPUd() int shareToWarp(const int, const int);
GPUd() int gpuAtomicAdd(int*, const int);
} // namespace Device
} // namespace Utils
} // namespace device_hip
} // namespace utils
} // namespace gpu
} // namespace its
} // namespace o2
Expand Down
Expand Up @@ -100,17 +100,17 @@ VectorHIP<T>::VectorHIP(const T* const source, const int size, const int initial
if (size > 0) {
try {

Utils::HostHIP::gpuMalloc(reinterpret_cast<void**>(&mArrayPointer), size * sizeof(T));
Utils::HostHIP::gpuMalloc(reinterpret_cast<void**>(&mDeviceSize), sizeof(int));
utils::host_hip::gpuMalloc(reinterpret_cast<void**>(&mArrayPointer), size * sizeof(T));
utils::host_hip::gpuMalloc(reinterpret_cast<void**>(&mDeviceSize), sizeof(int));

if (source != nullptr) {

Utils::HostHIP::gpuMemcpyHostToDevice(mArrayPointer, source, size * sizeof(T));
Utils::HostHIP::gpuMemcpyHostToDevice(mDeviceSize, &size, sizeof(int));
utils::host_hip::gpuMemcpyHostToDevice(mArrayPointer, source, size * sizeof(T));
utils::host_hip::gpuMemcpyHostToDevice(mDeviceSize, &size, sizeof(int));

} else {

Utils::HostHIP::gpuMemcpyHostToDevice(mDeviceSize, &initialSize, sizeof(int));
utils::host_hip::gpuMemcpyHostToDevice(mDeviceSize, &initialSize, sizeof(int));
}

} catch (...) {
Expand Down Expand Up @@ -179,15 +179,15 @@ template <typename T>
int VectorHIP<T>::getSizeFromDevice() const
{
int size;
Utils::HostHIP::gpuMemcpyDeviceToHost(&size, mDeviceSize, sizeof(int));
utils::host_hip::gpuMemcpyDeviceToHost(&size, mDeviceSize, sizeof(int));

return size;
}

template <typename T>
void VectorHIP<T>::resize(const int size)
{
Utils::HostHIP::gpuMemcpyHostToDevice(mDeviceSize, &size, sizeof(int));
utils::host_hip::gpuMemcpyHostToDevice(mDeviceSize, &size, sizeof(int));
}

template <typename T>
Expand All @@ -201,20 +201,20 @@ void VectorHIP<T>::reset(const T* const source, const int size, const int initia
{
if (size > mCapacity) {
if (mArrayPointer != nullptr) {
Utils::HostHIP::gpuFree(mArrayPointer);
utils::host_hip::gpuFree(mArrayPointer);
}

Utils::HostHIP::gpuMalloc(reinterpret_cast<void**>(&mArrayPointer), size * sizeof(T));
utils::host_hip::gpuMalloc(reinterpret_cast<void**>(&mArrayPointer), size * sizeof(T));
mCapacity = size;
}

if (source != nullptr) {

Utils::HostHIP::gpuMemcpyHostToDevice(mArrayPointer, source, size * sizeof(T));
Utils::HostHIP::gpuMemcpyHostToDevice(mDeviceSize, &size, sizeof(int));
utils::host_hip::gpuMemcpyHostToDevice(mArrayPointer, source, size * sizeof(T));
utils::host_hip::gpuMemcpyHostToDevice(mDeviceSize, &size, sizeof(int));

} else {
Utils::HostHIP::gpuMemcpyHostToDevice(mDeviceSize, &initialSize, sizeof(int));
utils::host_hip::gpuMemcpyHostToDevice(mDeviceSize, &initialSize, sizeof(int));
}
}

Expand All @@ -227,7 +227,7 @@ void VectorHIP<T>::copyIntoVector(std::vector<T>& destinationVector, const int s
try {

hostPrimitivePointer = static_cast<T*>(malloc(size * sizeof(T)));
Utils::HostHIP::gpuMemcpyDeviceToHost(hostPrimitivePointer, mArrayPointer, size * sizeof(T));
utils::host_hip::gpuMemcpyDeviceToHost(hostPrimitivePointer, mArrayPointer, size * sizeof(T));

destinationVector = std::move(std::vector<T>(hostPrimitivePointer, hostPrimitivePointer + size));

Expand All @@ -245,20 +245,20 @@ void VectorHIP<T>::copyIntoVector(std::vector<T>& destinationVector, const int s
template <typename T>
void VectorHIP<T>::copyIntoSizedVector(std::vector<T>& destinationVector)
{
Utils::HostHIP::gpuMemcpyDeviceToHost(destinationVector.data(), mArrayPointer, destinationVector.size() * sizeof(T));
utils::host_hip::gpuMemcpyDeviceToHost(destinationVector.data(), mArrayPointer, destinationVector.size() * sizeof(T));
}

template <typename T>
inline void VectorHIP<T>::destroy()
{
if (mArrayPointer != nullptr) {

Utils::HostHIP::gpuFree(mArrayPointer);
utils::host_hip::gpuFree(mArrayPointer);
}

if (mDeviceSize != nullptr) {

Utils::HostHIP::gpuFree(mDeviceSize);
utils::host_hip::gpuFree(mDeviceSize);
}
}

Expand Down Expand Up @@ -290,7 +290,7 @@ template <typename T>
T VectorHIP<T>::getElementFromDevice(const int index) const
{
T element;
Utils::HostHIP::gpuMemcpyDeviceToHost(&element, mArrayPointer + index, sizeof(T));
utils::host_hip::gpuMemcpyDeviceToHost(&element, mArrayPointer + index, sizeof(T));

return element;
}
Expand All @@ -304,7 +304,7 @@ GPUhd() int VectorHIP<T>::size() const
template <typename T>
GPUd() int VectorHIP<T>::extend(const int sizeIncrement) const
{
const int startIndex = Utils::DeviceHIP::gpuAtomicAdd(mDeviceSize, sizeIncrement);
const int startIndex = utils::device_hip::gpuAtomicAdd(mDeviceSize, sizeIncrement);
assert(size() <= mCapacity);

return startIndex;
Expand Down
Expand Up @@ -21,7 +21,6 @@
#include "ITStracking/VertexerTraits.h"
#include "ITStracking/Cluster.h"
#include "ITStracking/Constants.h"
// #include "ITStracking/Definitions.h"
#include "ITStracking/Tracklet.h"

#include "ITStrackingHIP/DeviceStoreVertexerHIP.h"
Expand Down
2 changes: 1 addition & 1 deletion Detectors/ITSMFT/ITS/tracking/hip/src/ContextHIP.hip.cxx
Expand Up @@ -43,7 +43,7 @@ namespace its
namespace gpu
{

using Utils::HostHIP::checkHIPError;
using utils::host_hip::checkHIPError;

ContextHIP::ContextHIP(bool dumpDevices)
{
Expand Down
Expand Up @@ -84,8 +84,8 @@ UniquePointer<DeviceStoreVertexerHIP> DeviceStoreVertexerHIP::initialise(const s
mIndexTables[0].reset(indexTables[0].data(), static_cast<int>(indexTables[0].size()));
mIndexTables[1].reset(indexTables[2].data(), static_cast<int>(indexTables[2].size()));

const dim3 threadsPerBlock{Utils::HostHIP::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{Utils::HostHIP::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};
const dim3 threadsPerBlock{utils::host_hip::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{utils::host_hip::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};

UniquePointer<DeviceStoreVertexerHIP> deviceStoreVertexerPtr{*this};

Expand Down
36 changes: 18 additions & 18 deletions Detectors/ITSMFT/ITS/tracking/hip/src/UtilsHIP.hip.cxx
Expand Up @@ -8,7 +8,7 @@
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
///
/// \file UtilsHIP.hip.cxx
/// \file utilsHIP.hip.cxx
/// \brief
///

Expand Down Expand Up @@ -58,7 +58,7 @@ namespace its
namespace gpu
{

void Utils::HostHIP::checkHIPError(const hipError_t error, const char* file, const int line)
void utils::host_hip::checkHIPError(const hipError_t error, const char* file, const int line)
{
if (error != hipSuccess) {
std::ostringstream errorString{};
Expand All @@ -68,18 +68,18 @@ void Utils::HostHIP::checkHIPError(const hipError_t error, const char* file, con
}
}

dim3 Utils::HostHIP::getBlockSize(const int colsNum)
dim3 utils::host_hip::getBlockSize(const int colsNum)
{
return getBlockSize(colsNum, 1);
}

dim3 Utils::HostHIP::getBlockSize(const int colsNum, const int rowsNum)
dim3 utils::host_hip::getBlockSize(const int colsNum, const int rowsNum)
{
const DeviceProperties& deviceProperties = ContextHIP::getInstance().getDeviceProperties();
return getBlockSize(colsNum, rowsNum, deviceProperties.streamProcessors / deviceProperties.maxBlocksPerSM);
}

dim3 Utils::HostHIP::getBlockSize(const int colsNum, const int rowsNum, const int maxThreadsPerBlock)
dim3 utils::host_hip::getBlockSize(const int colsNum, const int rowsNum, const int maxThreadsPerBlock)
{
const DeviceProperties& deviceProperties = ContextHIP::getInstance().getDeviceProperties();
int xThreads = std::max(std::min(colsNum, static_cast<int>(deviceProperties.maxThreadsDim.x)), 1);
Expand All @@ -98,71 +98,71 @@ dim3 Utils::HostHIP::getBlockSize(const int colsNum, const int rowsNum, const in
return dim3{static_cast<unsigned int>(xThreads), static_cast<unsigned int>(yThreads)};
}

dim3 Utils::HostHIP::getBlocksGrid(const dim3& threadsPerBlock, const int rowsNum)
dim3 utils::host_hip::getBlocksGrid(const dim3& threadsPerBlock, const int rowsNum)
{
return getBlocksGrid(threadsPerBlock, rowsNum, 1);
}

dim3 Utils::HostHIP::getBlocksGrid(const dim3& threadsPerBlock, const int rowsNum, const int colsNum)
dim3 utils::host_hip::getBlocksGrid(const dim3& threadsPerBlock, const int rowsNum, const int colsNum)
{
return dim3{1 + (rowsNum - 1) / threadsPerBlock.x, 1 + (colsNum - 1) / threadsPerBlock.y};
}

void Utils::HostHIP::gpuMalloc(void** p, const int size)
void utils::host_hip::gpuMalloc(void** p, const int size)
{
checkHIPError(hipMalloc(p, size), __FILE__, __LINE__);
}

void Utils::HostHIP::gpuFree(void* p)
void utils::host_hip::gpuFree(void* p)
{
checkHIPError(hipFree(p), __FILE__, __LINE__);
}

void Utils::HostHIP::gpuMemset(void* p, int value, int size)
void utils::host_hip::gpuMemset(void* p, int value, int size)
{
checkHIPError(hipMemset(p, value, size), __FILE__, __LINE__);
}

void Utils::HostHIP::gpuMemcpyHostToDevice(void* dst, const void* src, int size)
void utils::host_hip::gpuMemcpyHostToDevice(void* dst, const void* src, int size)
{
checkHIPError(hipMemcpy(dst, src, size, hipMemcpyHostToDevice), __FILE__, __LINE__);
}

void Utils::HostHIP::gpuMemcpyHostToDeviceAsync(void* dst, const void* src, int size, hipStream_t& stream)
void utils::host_hip::gpuMemcpyHostToDeviceAsync(void* dst, const void* src, int size, hipStream_t& stream)
{
checkHIPError(hipMemcpyAsync(dst, src, size, hipMemcpyHostToDevice, stream), __FILE__, __LINE__);
}

void Utils::HostHIP::gpuMemcpyDeviceToHost(void* dst, const void* src, int size)
void utils::host_hip::gpuMemcpyDeviceToHost(void* dst, const void* src, int size)
{
checkHIPError(hipMemcpy(dst, src, size, hipMemcpyDeviceToHost), __FILE__, __LINE__);
}

// void Utils::HostHIP::gpuStartProfiler()
// void utils::host_hip::gpuStartProfiler()
// {
// checkHIPError(hipProfilerStart(), __FILE__, __LINE__);
// }

// void Utils::HostHIP::gpuStopProfiler()
// void utils::host_hip::gpuStopProfiler()
// {
// checkHIPError(hipProfilerStop(), __FILE__, __LINE__);
// }

GPUd() int Utils::DeviceHIP::getLaneIndex()
GPUd() int utils::device_hip::getLaneIndex()
{
uint32_t laneIndex;
asm volatile("mov.u32 %0, %%laneid;"
: "=r"(laneIndex));
return static_cast<int>(laneIndex);
}

// GPUd() int Utils::Device::shareToWarp(const int value, const int laneIndex)
// GPUd() int utils::Device::shareToWarp(const int value, const int laneIndex)
// {
// cooperative_groups::coalesced_group threadGroup = cooperative_groups::coalesced_threads();
// return threadGroup.shfl(value, laneIndex);
// }

// GPUd() int Utils::Device::gpuAtomicAdd(int* p, const int incrementSize)
// GPUd() int utils::Device::gpuAtomicAdd(int* p, const int incrementSize)
// {
// return atomicAdd(p, incrementSize);
// }
Expand Down
Expand Up @@ -344,8 +344,8 @@ void VertexerTraitsHIP::computeTracklets()
std::cout << "\t\tno clusters on layer 1. Returning.\n";
return;
}
const dim3 threadsPerBlock{gpu::Utils::HostHIP::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{gpu::Utils::HostHIP::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};
const dim3 threadsPerBlock{gpu::utils::host_hip::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{gpu::utils::host_hip::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};

hipLaunchKernelGGL((gpu::trackleterKernel), dim3(blocksGrid), dim3(threadsPerBlock), 0, 0,
getDeviceContextPtr(),
Expand Down Expand Up @@ -375,8 +375,8 @@ void VertexerTraitsHIP::computeTrackletMatching()
std::cout << "\t\tno clusters on layer 1. Returning.\n";
return;
}
const dim3 threadsPerBlock{gpu::Utils::HostHIP::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{gpu::Utils::HostHIP::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};
const dim3 threadsPerBlock{gpu::utils::host_hip::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{gpu::utils::host_hip::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};
size_t bufferSize = mStoreVertexerGPU.getConfig().tmpCUBBufferSize * sizeof(int);

hipLaunchKernelGGL((gpu::trackletSelectionKernel), dim3(blocksGrid), dim3(threadsPerBlock), 0, 0,
Expand Down Expand Up @@ -423,8 +423,8 @@ void VertexerTraitsHIP::computeVertices()
std::cout << "\t\tno clusters on layer 1. Returning.\n";
return;
}
const dim3 threadsPerBlock{gpu::Utils::HostHIP::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{gpu::Utils::HostHIP::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};
const dim3 threadsPerBlock{gpu::utils::host_hip::getBlockSize(mClusters[1].capacity())};
const dim3 blocksGrid{gpu::utils::host_hip::getBlocksGrid(threadsPerBlock, mClusters[1].capacity())};
size_t bufferSize = mStoreVertexerGPU.getConfig().tmpCUBBufferSize * sizeof(int);
int nLines = mStoreVertexerGPU.getNExclusiveFoundLines().getElementFromDevice(mClusters[1].size() - 1) + mStoreVertexerGPU.getNFoundLines().getElementFromDevice(mClusters[1].size() - 1);
int nCentroids{static_cast<int>(nLines * (nLines - 1) / 2)};
Expand Down