Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
* 'master' of https://github.com/apache/incubator-mxnet: (192 commits)
  * impl - FFI for np einsum (apache#17869)
  [Numpy] FFI for diag/diagonal/diag_indices_from (apache#17789)
  [Numpy] Kron operator (apache#17323)
  cmake: Set DMLC_LOG_FATAL_THROW only for building mxnet and not for tvm (apache#17878)
  Add simplified HybridBlock.forward without F (apache#17530)
  Use FP32 copy of weights for norm (multitensor LAMB optimizer) (apache#17700)
  Use multi-tensor sumSQ in clip_global_norm (apache#17652)
  [Numpy] Add op fmax, fmin, fmod (apache#17567)
  Adding sparse support to MXTensor for custom operators (apache#17569)
  Update 3rdparty/mkldnn to v1.2.2 (apache#17313)
  Dynamic subgraph compile support (apache#17623)
  Refactor cpp-package CMakeLists.txt & add missing inference/imagenet_inference (apache#17835)
  staticbuild: Fix potential user-assisted execution of arbitrary code  (apache#17860)
  * FFI for np.argmax and np.argmin (apache#17843)
  ffi for roll/rot90 (apache#17861)
  Skip test_multi_worker_dataloader_release_pool on OS X (apache#17797)
  add ffi for full_like, binary (apache#17811)
  HybridBlock.export() to return created filenames (apache#17758)
  Fix SoftReLU fused operator numerical stability (apache#17849)
  CI: Test clang10 cpu & gpu builds with -WError (apache#17830)
  ...
  • Loading branch information
anirudh2290 committed Mar 27, 2020
2 parents 65fc8be + 56e7985 commit ae654ba
Show file tree
Hide file tree
Showing 851 changed files with 45,994 additions and 11,160 deletions.
26 changes: 26 additions & 0 deletions .github/workflows/os_x_staticbuild.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
name: continuous build

on: [push, pull_request]

jobs:
macosx-x86_64:
runs-on: macos-latest
steps:
- name: Checkout repository
uses: actions/checkout@v2
- name: Install Dependencies
run: |
brew install nasm automake ninja libtool cmake pkgconfig protobuf
- name: Build project
run: |
git --version
clang --version
CMAKE_STATICBUILD=1 ./tools/staticbuild/build.sh cpu
- name: Setup Python
run: |
python3 -m pip install --user nose nose-timer nose-exclude numpy scipy
python3 -m pip install --user -e python
- name: Test project
run: |
python3 -m nose --with-timer --verbose tests/python/unittest/ --exclude-test=test_extensions.test_subgraph --exclude-test=test_extensions.test_custom_op --exclude-test=test_gluon_data.test_recordimage_dataset_with_data_loader_multiworker
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ docs/web-data

#dmlc
config.mk
config.cmake

*.pyc
.Rhistory
Expand Down
1 change: 0 additions & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
[submodule "3rdparty/mkldnn"]
path = 3rdparty/mkldnn
url = https://github.com/intel/mkl-dnn.git
branch = master
[submodule "3rdparty/tvm"]
path = 3rdparty/tvm
url = https://github.com/apache/incubator-tvm.git
Expand Down
46 changes: 0 additions & 46 deletions .travis.yml

This file was deleted.

2 changes: 1 addition & 1 deletion 3rdparty/dlpack
2 changes: 1 addition & 1 deletion 3rdparty/mkldnn
87 changes: 84 additions & 3 deletions 3rdparty/mshadow/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,87 @@
cmake_minimum_required(VERSION 2.8.7)

cmake_minimum_required(VERSION 3.13)
project(mshadow C CXX)

include(CMakeDependentOption)
option(USE_CUDA "Build with CUDA support" ON)
option(USE_CUDNN ON)
cmake_dependent_option(USE_SSE "Build with x86 SSE instruction support" ON "NOT ARM" OFF)
option(USE_F16C "Build with x86 F16C instruction support" ON) # autodetects support if ON
option(USE_INT64_TENSOR_SIZE "Use int64_t to represent the total number of elements in a tensor" OFF)
option(MSHADOW_IN_CXX11 ON)

add_library(mshadow INTERFACE)
file(GLOB_RECURSE MSHADOWSOURCE "mshadow/*.h")
target_include_directories(mshadow INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}")
target_sources(mshadow INTERFACE ${MSHADOWSOURCE})
if(UNIX)
target_compile_options(mshadow INTERFACE
"$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CXX_COMPILER_ID:Clang>>:-Wno-braced-scalar-init>"
"$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CXX_COMPILER_ID:Clang>>:-Wno-pass-failed>"
# TODO Replace Wno-unused-lambda-capture with [[maybe_unused]] annotation once requiring C++17
"$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CXX_COMPILER_ID:Clang>>:-Wno-unused-lambda-capture>"
# TODO Fixing the warning leads to compile error on 4.8; fix once 4.8 support is dropped
"$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<CXX_COMPILER_ID:Clang>>:-Wno-undefined-var-template>"
"$<$<COMPILE_LANGUAGE:CXX>:-Wno-unused-parameter>"
"$<$<COMPILE_LANGUAGE:CXX>:-Wno-unknown-pragmas>"
"$<$<COMPILE_LANGUAGE:CXX>:-Wno-unused-local-typedefs>"
"$<$<COMPILE_LANGUAGE:CUDA>:--expt-relaxed-constexpr>")
endif()

if(USE_CUDA)
enable_language(CUDA)
file(GLOB_RECURSE MSHADOW_CUDASOURCE "mshadow/*.cuh")
target_sources(mshadow INTERFACE ${MSHADOW_CUDASOURCE})
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_CUDA=1
MSHADOW_FORCE_STREAM)
else()
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_CUDA=0)
endif()
if(USE_SSE)
# For cross compilation, we can't rely on the compiler checks, but mshadow
# will add platform specific includes not available in other arches
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-msse3" SUPPORT_MSSE3)
check_cxx_compiler_flag("-msse2" SUPPORT_MSSE2)
if(SUPPORT_MSSE3)
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_SSE)
target_compile_options(mshadow INTERFACE $<$<COMPILE_LANGUAGE:CXX>:-msse3>)
elseif(SUPPORT_MSSE2)
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_SSE)
target_compile_options(mshadow INTERFACE $<$<COMPILE_LANGUAGE:CXX>:-msse2>)
else()
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_SSE=0)
endif()
else()
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_SSE=0)
endif()
if(USE_CUDNN)
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_CUDNN)
endif()
if(MSHADOW_IN_CXX11)
target_compile_definitions(mshadow INTERFACE MSHADOW_IN_CXX11)
endif()
if(USE_F16C)
# Determine if hardware supports F16C instruction set
message(STATUS "Determining F16C support")
include(cmake/AutoDetectF16C.cmake)
if(SUPPORT_F16C)
target_compile_options(mshadow INTERFACE $<$<COMPILE_LANGUAGE:CXX>:-mf16c>)
else()
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_F16C=0)
endif()
else()
target_compile_definitions(mshadow INTERFACE MSHADOW_USE_F16C=0)
endif()
if(USE_INT64_TENSOR_SIZE)
message(STATUS "Using 64-bit integer for tensor size")
target_compile_definitions(mshadow INTERFACE MSHADOW_INT64_TENSOR_SIZE=1)
else()
target_compile_definitions(mshadow INTERFACE MSHADOW_INT64_TENSOR_SIZE=0)
endif()

set(mshadow_LINT_DIRS mshadow mshadow-ps)
add_custom_target(mshadow_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC} -DPYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} -DLINT_DIRS=${mshadow_LINT_DIRS} -DPROJECT_SOURCE_DIR=${PROJECT_SOURCE_DIR} -DPROJECT_NAME=mshadow -P ${PROJECT_SOURCE_DIR}/../dmlc-core/cmake/lint.cmake)
find_package(Python3)
add_custom_target(mshadow_lint COMMAND ${CMAKE_COMMAND} -DMSVC=${MSVC}
-DPYTHON_EXECUTABLE=${Python3_EXECUTABLE} -DLINT_DIRS=${mshadow_LINT_DIRS}
-DPROJECT_SOURCE_DIR=${PROJECT_SOURCE_DIR} -DPROJECT_NAME=mshadow
-P ${PROJECT_SOURCE_DIR}/../dmlc-core/cmake/lint.cmake)
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,11 @@ if(AUTO_DETECT_F16_CMAKE_INCLUDED)
return()
endif()
set(AUTO_DETECT_F16_CMAKE_INCLUDED True)

set(SUPPORT_F16C False)
if(ANDROID)
message("F16C instruction set is not yet supported for Andriod")
return()
endif()
if(MSVC)
message("F16C instruction set is not yet supported for MSVC")
return()
Expand Down
Loading

0 comments on commit ae654ba

Please sign in to comment.