Skip to content

Commit

Permalink
Merge branch 'develop' into pten/upgrade_infermeta_design
Browse files Browse the repository at this point in the history
  • Loading branch information
chenwhql committed Jan 25, 2022
2 parents 7bf344c + 6ca4916 commit 2bcf72d
Show file tree
Hide file tree
Showing 139 changed files with 4,390 additions and 2,671 deletions.
5 changes: 3 additions & 2 deletions cmake/inference_lib.cmake
Expand Up @@ -189,6 +189,7 @@ copy(inference_lib_dist
copy_part_of_thrid_party(inference_lib_dist ${PADDLE_INFERENCE_INSTALL_DIR})

set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid")

if(WIN32)
if(WITH_STATIC_LIB)
set(paddle_inference_lib $<TARGET_FILE_DIR:paddle_inference>/libpaddle_inference.lib
Expand Down Expand Up @@ -304,7 +305,7 @@ copy(fluid_lib_dist
)

set(module "platform")
set(platform_lib_deps profiler_proto error_codes_proto)
set(platform_lib_deps profiler_proto errors)
if(WITH_GPU)
set(platform_lib_deps ${platform_lib_deps} external_error_proto)
endif(WITH_GPU)
Expand All @@ -317,7 +318,7 @@ copy(fluid_lib_dist

set(module "string")
copy(fluid_lib_dist
SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/tinyformat/*.h
SRCS ${PADDLE_SOURCE_DIR}/paddle/utils/${module}/*.h ${PADDLE_SOURCE_DIR}/paddle/utils/${module}/tinyformat/*.h
DSTS ${dst_dir}/${module} ${dst_dir}/${module}/tinyformat
)

Expand Down
1 change: 1 addition & 0 deletions paddle/CMakeLists.txt
@@ -1,3 +1,4 @@
add_subdirectory(utils)
add_subdirectory(scripts)
add_subdirectory(testing)
set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests CACHE INTERNAL "python tests directory")
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/CMakeLists.txt
Expand Up @@ -4,7 +4,6 @@ add_subdirectory(distributed)
add_subdirectory(framework)
add_subdirectory(imperative)
add_subdirectory(operators)
add_subdirectory(string)
add_subdirectory(pybind)
add_subdirectory(eager)
# NOTE: please add subdirectory inference at last.
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/framework/data_type.h
Expand Up @@ -20,9 +20,9 @@ limitations under the License. */
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/platform/bfloat16.h"
#include "paddle/fluid/platform/complex.h"
#include "paddle/fluid/platform/eigen_ext.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/pten/kernels/funcs/eigen/extensions.h"

namespace paddle {
namespace framework {
Expand Down
12 changes: 8 additions & 4 deletions paddle/fluid/framework/expect.h
Expand Up @@ -19,14 +19,18 @@
#define _LINUX
#endif

#ifdef _LINUX
#ifndef likely
#define likely(x) __builtin_expect((x), 1)
#ifdef _LINUX
#define likely(expr) (__builtin_expect(!!(expr), 1))
#else
#define likely(expr) (expr)
#endif
#endif

#ifdef _LINUX
#ifndef unlikely
#define unlikely(x) __builtin_expect((x), 0)
#ifdef _LINUX
#define unlikely(expr) (__builtin_expect(!!(expr), 0))
#else
#define unlikely(expr) (expr)
#endif
#endif
8 changes: 4 additions & 4 deletions paddle/fluid/framework/ir/graph_helper.cc
Expand Up @@ -603,9 +603,9 @@ static std::vector<std::vector<ir::Node::Dep>> GetOpDependencies(
for (const auto *op_desc : block_ops) {
size_t op_idx = op_id_to_idx.size();
PADDLE_ENFORCE_EQ(
op_id_to_idx.emplace(op_desc->Id(), op_idx).second, true,
op_id_to_idx.emplace(op_desc->OriginalId(), op_idx).second, true,
platform::errors::InvalidArgument(
"There should not be duplicate op id: %d", op_desc->Id()));
"There should not be duplicate op id: %d", op_desc->OriginalId()));
}

std::vector<std::vector<ir::Node::Dep>> dep_matrix(op_num);
Expand All @@ -624,9 +624,9 @@ static std::vector<std::vector<ir::Node::Dep>> GetOpDependencies(

for (const auto &pair : all_preceding_ops) {
const auto *cur_op_node = pair.first;
size_t op_idx_1 = get_op_idx_by_id(cur_op_node->Op()->Id());
size_t op_idx_1 = get_op_idx_by_id(cur_op_node->Op()->OriginalId());
for (const auto *preceding_op_node : pair.second) {
size_t op_idx_2 = get_op_idx_by_id(preceding_op_node->Op()->Id());
size_t op_idx_2 = get_op_idx_by_id(preceding_op_node->Op()->OriginalId());
dep_matrix[op_idx_1][op_idx_2] = ir::Node::Dep::kAfter;
dep_matrix[op_idx_2][op_idx_1] = ir::Node::Dep::kBefore;
}
Expand Down
6 changes: 0 additions & 6 deletions paddle/fluid/framework/op_call_stack.h
Expand Up @@ -19,12 +19,6 @@ limitations under the License. */
#include "paddle/fluid/framework/type_defs.h"
#include "paddle/fluid/platform/enforce.h"

namespace paddle {
namespace platform {
struct EnforceNotMet;
} // namespace platform
} // namespace paddle

namespace paddle {
namespace framework {

Expand Down
6 changes: 0 additions & 6 deletions paddle/fluid/framework/op_proto_maker_test.cc
Expand Up @@ -18,12 +18,6 @@ limitations under the License. */
#include "gtest/gtest-test-part.h"
#include "gtest/gtest.h"

namespace paddle {
namespace platform {
struct EnforceNotMet;
} // namespace platform
} // namespace paddle

class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker {
public:
void Make() {
Expand Down
10 changes: 1 addition & 9 deletions paddle/fluid/framework/type_defs.h
Expand Up @@ -21,8 +21,8 @@ limitations under the License. */
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "paddle/fluid/imperative/type_defs.h"
#include "paddle/fluid/platform/variant.h"
#include "paddle/pten/core/type_defs.h"
#include "paddle/utils/small_vector.h"

namespace paddle {
Expand All @@ -39,14 +39,6 @@ class InferNoNeedBufferVarsFN;
using VariableNameMap = std::map<std::string, std::vector<std::string>>;
using VariableValueMap = std::map<std::string, std::vector<Variable*>>;

// The order should be as same as framework.proto
using Attribute = boost::variant<
boost::blank, int, float, std::string, std::vector<int>, std::vector<float>,
std::vector<std::string>, bool, std::vector<bool>, BlockDesc*, int64_t,
std::vector<BlockDesc*>, std::vector<int64_t>, std::vector<double>>;

using AttributeMap = std::unordered_map<std::string, Attribute>;

#ifdef PADDLE_WITH_ASCEND_CL
using NPUAttribute =
boost::variant<boost::blank, int, float, std::string, std::vector<int>,
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/imperative/prepared_operator.cc
Expand Up @@ -369,6 +369,10 @@ static void BuildDygraphPtenKernelContext(
size_t end_idx = start_idx + outs_vector.size();

for (size_t offset = 0; offset < outs_vector.size(); ++offset) {
if (outs_vector[offset] == nullptr) {
kernel_ctx->EmplaceBackOutputWithoutSetRange({nullptr});
continue;
}
auto* var = outs_vector[offset]->MutableVar();
framework::Tensor* tensor_out = nullptr;
if (var->template IsType<framework::LoDTensor>()) {
Expand Down
45 changes: 1 addition & 44 deletions paddle/fluid/imperative/type_defs.h
Expand Up @@ -13,47 +13,4 @@ See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include <map>
#include <memory>
#include <string>
#include <vector>

namespace paddle {
namespace imperative {

class VariableWrapper;
class SavedVariableWrapperList;
class VarBase;
class OpBase;
class GradOpNode;
class Tracer;

using WeakNameVarBaseMap =
std::map<std::string, std::vector<std::weak_ptr<VarBase>>>;

namespace details {
template <typename T>
struct NameVarMapTrait {};

template <>
struct NameVarMapTrait<VarBase> {
using Type = std::map<std::string, std::vector<std::shared_ptr<VarBase>>>;
};

template <>
struct NameVarMapTrait<VariableWrapper> {
using Type = std::map<std::string, SavedVariableWrapperList>;
};
} // namespace details

template <typename T>
using NameVarMap = typename details::NameVarMapTrait<T>::Type;

using NameVarBaseMap = NameVarMap<VarBase>;
using NameVariableWrapperMap = NameVarMap<VariableWrapper>;

using VariableWrapperList = std::vector<std::shared_ptr<VariableWrapper>>;

} // namespace imperative
} // namespace paddle
#include "paddle/pten/core/type_defs.h"
5 changes: 3 additions & 2 deletions paddle/fluid/inference/CMakeLists.txt
Expand Up @@ -36,6 +36,7 @@ endif()
# fluid_modules exclude API-interface of inference/api and inference/capi_exp
get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
get_property(pten_modules GLOBAL PROPERTY PTEN_MODULES)
set(utils_modules stringpiece pretty_log string_helper)

add_subdirectory(api)

Expand All @@ -46,9 +47,9 @@ set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor
analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg})
#TODO(wilber, T8T9): Do we still need to support windows gpu static library?
if(WIN32 AND WITH_GPU)
cc_library(paddle_inference DEPS ${fluid_modules} ${pten_modules} ${STATIC_INFERENCE_API})
cc_library(paddle_inference DEPS ${fluid_modules} ${pten_modules} ${STATIC_INFERENCE_API} ${utils_modules})
else()
create_static_lib(paddle_inference ${fluid_modules} ${pten_modules} ${STATIC_INFERENCE_API})
create_static_lib(paddle_inference ${fluid_modules} ${pten_modules} ${STATIC_INFERENCE_API} ${utils_modules})
if(WITH_IPU)
target_link_libraries(paddle_inference -Wl,--allow-multiple-definition popart_canonicalization_utils)
endif()
Expand Down
1 change: 0 additions & 1 deletion paddle/fluid/operators/CMakeLists.txt
Expand Up @@ -8,7 +8,6 @@ set(pybind_file_final ${PADDLE_BINARY_DIR}/paddle/fluid/pybind/pybind.h)
file(WRITE ${pybind_file} "// Generated by the paddle/fluid/operators/CMakeLists.txt. DO NOT EDIT!\n\n")

add_subdirectory(math)
add_subdirectory(eigen)
add_subdirectory(controlflow)
add_subdirectory(detection)
add_subdirectory(elementwise)
Expand Down
59 changes: 25 additions & 34 deletions paddle/fluid/operators/controlflow/compare_op.h
Expand Up @@ -22,49 +22,40 @@ limitations under the License. */
namespace paddle {
namespace operators {

template <typename T>
struct LessThanFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T a, const T b) const { return a < b; }
};

template <typename T>
struct LessEqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T a, const T b) const { return a <= b; }
};

template <typename T>
struct GreaterThanFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T a, const T b) const { return a > b; }
};

template <typename T>
struct GreaterEqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T a, const T b) const { return a >= b; }
};

template <typename T>
#define COMPARE_FUNCTOR(func_name, op) \
template <typename InT, typename OutT = bool> \
struct func_name { \
using ELEM_TYPE = InT; \
HOSTDEVICE OutT operator()(const InT a, const InT b) const { \
return static_cast<OutT>(a op b); \
} \
};

COMPARE_FUNCTOR(LessThanFunctor, <)
COMPARE_FUNCTOR(LessEqualFunctor, <=)
COMPARE_FUNCTOR(GreaterThanFunctor, >)
COMPARE_FUNCTOR(GreaterEqualFunctor, >=)
#undef COMPARE_FUNCTOR

template <typename InT, typename OutT = bool>
struct EqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T a, const T b) const {
if (std::is_floating_point<T>::value) {
using ELEM_TYPE = InT;
HOSTDEVICE OutT operator()(const InT a, const InT b) const {
if (std::is_floating_point<InT>::value) {
// This branch will be optimized while compiling if T is integer. It is
// safe to cast a and b to double.
return fabs(static_cast<double>(a - b)) < 1e-8;
return static_cast<OutT>(fabs(static_cast<double>(a - b)) < 1e-8);
} else {
return (a == b);
return static_cast<OutT>(a == b);
}
}
};

template <typename T>
template <typename InT, typename OutT = bool>
struct NotEqualFunctor {
using ELEM_TYPE = T;
HOSTDEVICE bool operator()(const T a, const T b) const {
return !EqualFunctor<T>()(a, b);
using ELEM_TYPE = InT;
HOSTDEVICE bool operator()(const InT a, const InT b) const {
return !EqualFunctor<InT, OutT>()(a, b);
}
};

Expand Down
9 changes: 0 additions & 9 deletions paddle/fluid/operators/eigen/CMakeLists.txt

This file was deleted.

0 comments on commit 2bcf72d

Please sign in to comment.