Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

RPP Tensor Voxel Support - Add and Substract on HOST and HIP #209

85 changes: 85 additions & 0 deletions include/rppt_tensor_arithmetic_operations.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,91 @@ RppStatus rppt_fused_multiply_add_scalar_host(RppPtr_t srcPtr, RpptGenericDescPt
RppStatus rppt_fused_multiply_add_scalar_gpu(RppPtr_t srcPtr, RpptGenericDescPtr srcGenericDescPtr, RppPtr_t dstPtr, RpptGenericDescPtr dstGenericDescPtr, Rpp32f *mulTensor, Rpp32f *addTensor, RpptROI3DPtr roiGenericPtrSrc, RpptRoi3DType roiType, rppHandle_t rppHandle);
#endif // GPU_SUPPORT

/*! \brief add augmentation HOST
* \details This function performs the addition operation on a batch of 4D tensors.
* It adds a corresponding element from the 'addTensor' to source tensor, and stores the result in the destination tensor.
* Support added for f32 -> f32 dataype.
* \param [in] srcPtr source tensor memory
* \param[in] srcGenericDescPtr source tensor descriptor
* \param[out] dstPtr destination tensor memory
* \param[in] dstGenericDescPtr destination tensor descriptor
* \param[in] addTensor add values for used for addition (1D tensor of batchSize Rpp32f values)
* \param[in] roiGenericPtrSrc ROI data for each image in source tensor (tensor of batchSize RpptRoiGeneric values)
* \param[in] roiType ROI type used (RpptRoi3DType::XYZWHD or RpptRoi3DType::LTFRBB)
* \param [in] rppHandle Host-handle
* \return <tt> RppStatus enum</tt>.
* \returns RPP_SUCCESS <tt>\ref RppStatus</tt> on successful completion.
* Else return RPP_ERROR
* \ingroup group_tensor_arithmetic
*/
RppStatus rppt_add_scalar_host(RppPtr_t srcPtr, RpptGenericDescPtr srcGenericDescPtr, RppPtr_t dstPtr, RpptGenericDescPtr dstGenericDescPtr, Rpp32f *addTensor, RpptROI3DPtr roiGenericPtrSrc, RpptRoi3DType roiType, rppHandle_t rppHandle);

/*! \brief add augmentation GPU
* \details This function performs the addition operation on a batch of 4D tensors.
* It adds a corresponding element from the 'addTensor' to source tensor, and stores the result in the destination tensor.
* Support added for f32 -> f32 dataype.
* \param [in] srcPtr source tensor memory
* \param[in] srcGenericDescPtr source tensor descriptor
* \param[out] dstPtr destination tensor memory
* \param[in] dstGenericDescPtr destination tensor descriptor
* \param[in] addTensor add values for used for addition (1D tensor of batchSize Rpp32f values)
* \param[in] roiGenericPtrSrc ROI data for each image in source tensor (tensor of batchSize RpptRoiGeneric values)
* \param[in] roiType ROI type used (RpptRoi3DType::XYZWHD or RpptRoi3DType::LTFRBB)
* \param [in] rppHandle Hip-handle
* \return <tt> RppStatus enum</tt>.
* \returns RPP_SUCCESS <tt>\ref RppStatus</tt> on successful completion.
* Else return RPP_ERROR
* \ingroup group_tensor_arithmetic
*/

#ifdef GPU_SUPPORT
RppStatus rppt_add_scalar_gpu(RppPtr_t srcPtr, RpptGenericDescPtr srcGenericDescPtr, RppPtr_t dstPtr, RpptGenericDescPtr dstGenericDescPtr, Rpp32f *addTensor, RpptROI3DPtr roiGenericPtrSrc, RpptRoi3DType roiType, rppHandle_t rppHandle);
#endif // GPU_SUPPORT

/******************** subtract_scalar ********************/

/*! \brief subtract augmentation HOST
* \details This function performs the subtraction operation on a batch of 4D tensors.
* It takes a corresponding element from 'subtractTensor' and subtracts it from source tensor. Result is stored in the destination tensor.
* Support added for f32 -> f32 dataype.
* \param [in] srcPtr source tensor memory
* \param[in] srcGenericDescPtr source tensor descriptor
* \param[out] dstPtr destination tensor memory
* \param[in] dstGenericDescPtr destination tensor descriptor
* \param[in] subtractTensor subtract values for used for subtraction (1D tensor of batchSize Rpp32f values)
* \param[in] roiGenericPtrSrc ROI data for each image in source tensor (tensor of batchSize RpptRoiGeneric values)
* \param[in] roiType ROI type used (RpptRoi3DType::XYZWHD or RpptRoi3DType::LTFRBB)
* \param [in] rppHandle Host-handle
* \return <tt> RppStatus enum</tt>.
* \returns RPP_SUCCESS <tt>\ref RppStatus</tt> on successful completion.
* Else return RPP_ERROR
* \ingroup group_tensor_arithmetic
*/

RppStatus rppt_subtract_scalar_host(RppPtr_t srcPtr, RpptGenericDescPtr srcGenericDescPtr, RppPtr_t dstPtr, RpptGenericDescPtr dstGenericDescPtr, Rpp32f *subtractTensor, RpptROI3DPtr roiGenericPtrSrc, RpptRoi3DType roiType, rppHandle_t rppHandle);

/*! \brief subtract augmentation GPU
* \details This function performs the subtraction operation on a batch of 4D tensors.
* It takes a corresponding element from 'subtractTensor' and subtracts it from source tensor. Result is stored in the destination tensor.
* Support added for f32 -> f32 dataype.
* \param [in] srcPtr source tensor memory
* \param[in] srcGenericDescPtr source tensor descriptor
* \param[out] dstPtr destination tensor memory
* \param[in] dstGenericDescPtr destination tensor descriptor
* \param[in] subtractTensor subtract values for used for subtraction (1D tensor of batchSize Rpp32f values)
* \param[in] roiGenericPtrSrc ROI data for each image in source tensor (tensor of batchSize RpptRoiGeneric values)
* \param[in] roiType ROI type used (RpptRoi3DType::XYZWHD or RpptRoi3DType::LTFRBB)
* \param [in] rppHandle Hip-handle
* \return <tt> RppStatus enum</tt>.
* \returns RPP_SUCCESS <tt>\ref RppStatus</tt> on successful completion.
* Else return RPP_ERROR
* \ingroup group_tensor_arithmetic
*/

#ifdef GPU_SUPPORT
RppStatus rppt_subtract_scalar_gpu(RppPtr_t srcPtr, RpptGenericDescPtr srcGenericDescPtr, RppPtr_t dstPtr, RpptGenericDescPtr dstGenericDescPtr, Rpp32f *subtractTensor, RpptROI3DPtr roiGenericPtrSrc, RpptRoi3DType roiType, rppHandle_t rppHandle);
#endif // GPU_SUPPORT

#ifdef __cplusplus
}
#endif
Expand Down
12 changes: 12 additions & 0 deletions src/include/cpu/rpp_cpu_common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -2427,6 +2427,18 @@ inline RppStatus custom_convolve_image_host(T* srcPtr, RppiSize srcSize, U* dstP

// Compute Functions for RPP Tensor API

inline void compute_subtract_16_host(__m256 *p, __m256 *pSubtractParam)
{
p[0] = _mm256_sub_ps(p[0], pSubtractParam[0]); // add adjustment
p[1] = _mm256_sub_ps(p[1], pSubtractParam[0]); // add adjustment
}

inline void compute_add_16_host(__m256 *p, __m256 *pAddParam)
{
p[0] = _mm256_add_ps(p[0], pAddParam[0]); // add adjustment
p[1] = _mm256_add_ps(p[1], pAddParam[0]); // add adjustment
}

inline void compute_rmn_24_host(__m256 *p, __m256 *pRMNParams)
{
p[0] = _mm256_mul_ps(_mm256_sub_ps(p[0], pRMNParams[0]), pRMNParams[1]);
Expand Down
2 changes: 2 additions & 0 deletions src/modules/cpu/host_tensor_arithmetic_operations.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,5 +24,7 @@ THE SOFTWARE.
#define HOST_TENSOR_ARITHMETIC_OPERATIONS_HPP

#include "kernel/fused_multiply_add_scalar.hpp"
#include "kernel/add_scalar.hpp"
#include "kernel/subtract_scalar.hpp"

#endif // HOST_TENSOR_ARITHMETIC_OPERATIONS_HPP
154 changes: 154 additions & 0 deletions src/modules/cpu/kernel/add_scalar.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,154 @@
/*
Copyright (c) 2019 - 2023 Advanced Micro Devices, Inc. All rights reserved.

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/

#include "rppdefs.h"
#include "rpp_cpu_simd.hpp"
#include "rpp_cpu_common.hpp"

RppStatus add_scalar_f32_f32_host_tensor(Rpp32f *srcPtr,
RpptGenericDescPtr srcGenericDescPtr,
Rpp32f *dstPtr,
RpptGenericDescPtr dstGenericDescPtr,
Rpp32f *addTensor,
RpptROI3DPtr roiGenericPtrSrc,
RpptRoi3DType roiType,
RppLayoutParams layoutParams,
rpp::Handle& handle)
{
RpptROI3D roiDefault;
if(srcGenericDescPtr->layout==RpptLayout::NCDHW)
roiDefault = {0, 0, 0, (Rpp32s)srcGenericDescPtr->dims[4], (Rpp32s)srcGenericDescPtr->dims[3], (Rpp32s)srcGenericDescPtr->dims[2]};
else if(srcGenericDescPtr->layout==RpptLayout::NDHWC)
roiDefault = {0, 0, 0, (Rpp32s)srcGenericDescPtr->dims[3], (Rpp32s)srcGenericDescPtr->dims[2], (Rpp32s)srcGenericDescPtr->dims[1]};
Rpp32u numThreads = handle.GetNumThreads();

omp_set_dynamic(0);
#pragma omp parallel for num_threads(numThreads)
for(int batchCount = 0; batchCount < dstGenericDescPtr->dims[0]; batchCount++)
{
RpptROI3D roi;
RpptROI3DPtr roiPtrInput = &roiGenericPtrSrc[batchCount];
compute_roi3D_validation_host(roiPtrInput, &roi, &roiDefault, roiType);

Rpp32f *srcPtrImage, *dstPtrImage;
srcPtrImage = srcPtr + batchCount * srcGenericDescPtr->strides[0];
dstPtrImage = dstPtr + batchCount * dstGenericDescPtr->strides[0];

Rpp32f addParam = addTensor[batchCount];
Rpp32f *srcPtrChannel, *dstPtrChannel;
dstPtrChannel = dstPtrImage;

Rpp32u vectorIncrement = 16;
Rpp32u bufferLength = roi.xyzwhdROI.roiWidth * layoutParams.bufferMultiplier;
Rpp32u alignedLength = (bufferLength / vectorIncrement) * vectorIncrement;
__m256 pAddParam = _mm256_set1_ps(addParam);

// Add without fused output-layout toggle (NCDHW -> NCDHW)
if((srcGenericDescPtr->layout == RpptLayout::NCDHW) && (dstGenericDescPtr->layout == RpptLayout::NCDHW))
{
srcPtrChannel = srcPtrImage + (roi.xyzwhdROI.xyz.z * srcGenericDescPtr->strides[2]) + (roi.xyzwhdROI.xyz.y * srcGenericDescPtr->strides[3]) + (roi.xyzwhdROI.xyz.x * layoutParams.bufferMultiplier);

for(int c = 0; c < layoutParams.channelParam; c++)
{
Rpp32f *srcPtrDepth, *dstPtrDepth;
srcPtrDepth = srcPtrChannel;
dstPtrDepth = dstPtrChannel;
for(int i = 0; i < roi.xyzwhdROI.roiDepth; i++)
{
Rpp32f *srcPtrRow, *dstPtrRow;
srcPtrRow = srcPtrDepth;
dstPtrRow = dstPtrDepth;
for(int j = 0; j < roi.xyzwhdROI.roiHeight; j++)
{
Rpp32f *srcPtrTemp, *dstPtrTemp;
srcPtrTemp = srcPtrRow;
dstPtrTemp = dstPtrRow;
int vectorLoopCount = 0;
for (; vectorLoopCount < alignedLength; vectorLoopCount += vectorIncrement)
{
__m256 p[2];
rpp_simd_load(rpp_load16_f32_to_f32_avx, srcPtrTemp, p); // simd loads
compute_add_16_host(p, &pAddParam); // Add adjustment
rpp_simd_store(rpp_store16_f32_to_f32_avx, dstPtrTemp, p); // simd stores
srcPtrTemp += vectorIncrement;
dstPtrTemp += vectorIncrement;
}
for (; vectorLoopCount < bufferLength; vectorLoopCount++)
{
*dstPtrTemp = *srcPtrTemp + addParam;
srcPtrTemp++;
dstPtrTemp++;
}
srcPtrRow += srcGenericDescPtr->strides[3];
dstPtrRow += dstGenericDescPtr->strides[3];
}
srcPtrDepth += srcGenericDescPtr->strides[2];
dstPtrDepth += dstGenericDescPtr->strides[2];
}
srcPtrChannel += srcGenericDescPtr->strides[1];
dstPtrChannel += srcGenericDescPtr->strides[1];
}
}
// Add without fused output-layout toggle (NDHWC -> NDHWC)
else if((srcGenericDescPtr->layout == RpptLayout::NDHWC) && (dstGenericDescPtr->layout == RpptLayout::NDHWC))
{
srcPtrChannel = srcPtrImage + (roi.xyzwhdROI.xyz.z * srcGenericDescPtr->strides[1]) + (roi.xyzwhdROI.xyz.y * srcGenericDescPtr->strides[2]) + (roi.xyzwhdROI.xyz.x * layoutParams.bufferMultiplier);
Rpp32f *srcPtrDepth = srcPtrChannel;
Rpp32f *dstPtrDepth = dstPtrChannel;
for(int i = 0; i < roi.xyzwhdROI.roiDepth; i++)
{
Rpp32f *srcPtrRow, *dstPtrRow;
srcPtrRow = srcPtrDepth;
dstPtrRow = dstPtrDepth;
for(int j = 0; j < roi.xyzwhdROI.roiHeight; j++)
{
Rpp32f *srcPtrTemp, *dstPtrTemp;
srcPtrTemp = srcPtrRow;
dstPtrTemp = dstPtrRow;

int vectorLoopCount = 0;
for (; vectorLoopCount < alignedLength; vectorLoopCount += vectorIncrement)
{
__m256 p[2];
rpp_simd_load(rpp_load16_f32_to_f32_avx, srcPtrTemp, p); // simd loads
compute_add_16_host(p, &pAddParam); // Add adjustment
rpp_simd_store(rpp_store16_f32_to_f32_avx, dstPtrTemp, p); // simd stores
srcPtrTemp += vectorIncrement;
dstPtrTemp += vectorIncrement;
}
for (; vectorLoopCount < bufferLength; vectorLoopCount++)
{
*dstPtrTemp = *srcPtrTemp + addParam;
srcPtrTemp++;
dstPtrTemp++;
}
srcPtrRow += srcGenericDescPtr->strides[2];
dstPtrRow += dstGenericDescPtr->strides[2];
}
srcPtrDepth += srcGenericDescPtr->strides[1];
dstPtrDepth += dstGenericDescPtr->strides[1];
}
}
}

return RPP_SUCCESS;
}
Loading