Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions .github/PULL_REQUEST_TEMPLATE.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
<!-- Demo: https://github.com/PaddlePaddle/Paddle/pull/24810 -->
### PR types
<!-- One of [ New features | Bug fixes | Function optimization | Performance optimization | Breaking changes | Others ] -->

### PR changes
<!-- One of [ OPs | APIs | Docs | Others ] -->

### Describe
<!-- Describe what this PR does -->
27 changes: 26 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,9 @@ option(TRT_DIRECTORY "If build tensorrt backend, need to define path of tensorrt
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
option(ENABLE_VISION_VISUALIZE "Whether to enable visualize vision model result toolbox." ON)
option(ENABLE_TEXT "Whether to enable text models usage." OFF)
option(WITH_TESTING "Whether to compile with unittest." OFF)
# TODO(zhoushunjie): Will remove it later.
option(ENABLE_FDTENSOR_FUNC "Whether to compile with function of FDTensor." OFF)

# Please don't open this flag now, some bugs exists.
option(ENABLE_OPENCV_CUDA "Whether to enable opencv with cuda, this will allow process image with GPU." OFF)
Expand All @@ -56,6 +59,12 @@ option(ENABLE_DEBUG "Whether to enable print debug information, this may reduce
option(WITH_VISION_EXAMPLES "Whether to build fastdeply with vision examples" OFF)
option(WITH_TEXT_EXAMPLES "Whether to build fastdeply with text examples" OFF)

# config GIT_URL with github mirrors to speed up dependent repos clone
option(GIT_URL "Git URL to clone dependent repos" ${GIT_URL})
if(NOT GIT_URL)
set(GIT_URL "https://github.com")
endif()

# Check for 32bit system
if(WIN32)
if(NOT CMAKE_CL_64)
Expand Down Expand Up @@ -108,13 +117,14 @@ endif()

add_definitions(-DFASTDEPLOY_LIB)
file(GLOB_RECURSE ALL_DEPLOY_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/*.cc)
file(GLOB_RECURSE FDTENSOR_FUNC_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/function/*.cc)
file(GLOB_RECURSE DEPLOY_ORT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/ort/*.cc)
file(GLOB_RECURSE DEPLOY_PADDLE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/paddle/*.cc)
file(GLOB_RECURSE DEPLOY_TRT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/backends/tensorrt/*.cpp)
file(GLOB_RECURSE DEPLOY_VISION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/vision/*.cc)
file(GLOB_RECURSE DEPLOY_TEXT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/text/*.cc)
file(GLOB_RECURSE DEPLOY_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pybind/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/*_pybind.cc)
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS})
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS} ${DEPLOY_TRT_SRCS} ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS} ${FDTENSOR_FUNC_SRCS})

set(DEPEND_LIBS "")

Expand Down Expand Up @@ -223,6 +233,11 @@ if(ENABLE_TEXT)
include(external/faster_tokenizer.cmake)
endif()

if (ENABLE_FDTENSOR_FUNC)
add_definitions(-DENABLE_FDTENSOR_FUNC)
list(APPEND ALL_DEPLOY_SRCS ${FDTENSOR_FUNC_SRCS})
endif()

configure_file(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/core/config.h.in ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/core/config.h)
configure_file(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pybind/main.cc.in ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pybind/main.cc)
configure_file(${PROJECT_SOURCE_DIR}/FastDeploy.cmake.in ${PROJECT_SOURCE_DIR}/FastDeploy.cmake @ONLY)
Expand All @@ -231,6 +246,8 @@ configure_file(${PROJECT_SOURCE_DIR}/fastdeploy/c_lib_wrap.py.in ${PROJECT_SOURC
list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_PYBIND_SRCS})

add_library(${LIBRARY_NAME} SHARED ${ALL_DEPLOY_SRCS})
add_dependencies(${LIBRARY_NAME} extern_eigen3)

redefine_file_macro(${LIBRARY_NAME})
set_target_properties(${LIBRARY_NAME} PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
if(NOT APPLE)
Expand Down Expand Up @@ -276,6 +293,14 @@ if (WITH_TEXT_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
endif()
endif()

if (WITH_TESTING AND EXISTS ${PROJECT_SOURCE_DIR}/tests)
add_definitions(-DWITH_TESTING)
include(external/gtest.cmake)
include(external/gflags.cmake)
include(external/glog.cmake)
add_subdirectory(tests)
endif()

include(external/summary.cmake)
fastdeploy_summary()
if(WIN32)
Expand Down
32 changes: 32 additions & 0 deletions csrcs/fastdeploy/function/eigen.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/function/eigen.h"

namespace fastdeploy {

std::shared_ptr<EigenDeviceWrapper> EigenDeviceWrapper::instance_ = nullptr;

std::shared_ptr<EigenDeviceWrapper> EigenDeviceWrapper::GetInstance() {
if (instance_ == nullptr) {
instance_ = std::make_shared<EigenDeviceWrapper>();
}
return instance_;
}

const Eigen::DefaultDevice* EigenDeviceWrapper::GetDevice() const {
return &device_;
}

} // namespace fastdeploy
109 changes: 109 additions & 0 deletions csrcs/fastdeploy/function/eigen.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <algorithm>
#include <memory>
#include <vector>
#include "fastdeploy/core/fd_tensor.h"
#include "unsupported/Eigen/CXX11/Tensor"

namespace fastdeploy {
// EigenDim converts shape into Eigen::DSizes.
template <int D>
struct EigenDim {
using Type = Eigen::DSizes<Eigen::DenseIndex, D>;

static Type From(const std::vector<int64_t>& dims) {
Type ret;
for (int64_t d = 0; d < dims.size(); d++) {
ret[d] = dims[d];
}
return ret;
}
};

// Interpret FDTensor as EigenTensor and EigenConstTensor.
template <typename T, size_t D, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
struct EigenTensor {
using Type = Eigen::TensorMap<Eigen::Tensor<T, D, MajorType, IndexType>>;

using ConstType =
Eigen::TensorMap<Eigen::Tensor<const T, D, MajorType, IndexType>>;

static Type From(FDTensor& tensor,
const std::vector<int64_t>& dims) { // NOLINT
return Type(reinterpret_cast<T*>(tensor.Data()), EigenDim<D>::From(dims));
}

static Type From(FDTensor& tensor) { // NOLINT
return From(tensor, tensor.shape);
} // NOLINT

static ConstType From(const FDTensor& tensor,
const std::vector<int64_t>& dims) {
return ConstType(reinterpret_cast<const T*>(tensor.Data()),
EigenDim<D>::From(dims));
}

static ConstType From(const FDTensor& tensor) {
return From(tensor, tensor.shape);
}
};

template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
struct EigenScalar {
// Scalar tensor (implemented as a rank-0 tensor) of scalar type T.
using Type = Eigen::TensorMap<
Eigen::TensorFixedSize<T, Eigen::Sizes<>, MajorType, IndexType>>;
using ConstType = Eigen::TensorMap<
Eigen::TensorFixedSize<const T, Eigen::Sizes<>, MajorType, IndexType>>;

static Type From(FDTensor& tensor) {
return Type(reinterpret_cast<T*>(tensor.Data()));
} // NOLINT

static ConstType From(const FDTensor& tensor) {
return ConstType(reinterpret_cast<const T*>(tensor.Data()));
}
};

template <typename T, int MajorType = Eigen::RowMajor,
typename IndexType = Eigen::DenseIndex>
struct EigenVector : public EigenTensor<T, 1, MajorType, IndexType> {
// Flatten reshapes a Tensor into an EigenVector.
static typename EigenVector::Type Flatten(FDTensor& tensor) { // NOLINT
return EigenVector::From(tensor, {tensor.Numel()});
}

static typename EigenVector::ConstType Flatten(
const FDTensor& tensor) { // NOLINT
return EigenVector::From(tensor, {tensor.Numel()});
}
};

class EigenDeviceWrapper {
public:
static std::shared_ptr<EigenDeviceWrapper> GetInstance();
const Eigen::DefaultDevice* GetDevice() const;

private:
Eigen::DefaultDevice device_;
static std::shared_ptr<EigenDeviceWrapper> instance_;
};

} // namespace fastdeploy
Loading