pip3 install apache-tvm
pip3 install apache-tvm-cu102 -f https://tlcpack.ai/wheels
- Check TLCPack for more TVM packages
- Find out installed graphics card by
sudo lshw -C display
orlspci | grep -i --color 'vga|3d|2d'
- CUDA toolkit version >= 8.0 is required
- Use
nvidia-smi
to check your version - Use
sudo nvidia-settings
to configure NVIDIA graphics driver
- Use
- Install OpenCL development files
sudo apt install ocl-icd-opencl-dev
- Install the package of querying OpenCL information
sudo apt install clinfo
- Deploy OpenCL runtime of Intel graphics
sudo apt install intel-opencl-icd
- Check your Intel device with
clinfo
- g++ 7.1 or higher
- CMake 3.18 or higher
- LLVM 4.0 or higher for CPU code generation
sudo apt install -y llvm
- Use
llvm-config --version
to check your version
sudo apt update
sudo apt install -y python3 python3-dev python3-setuptools gcc libtinfo-dev zlib1g-dev build-essential cmake vim git
wget https://github.com/oneapi-src/oneDNN/archive/refs/tags/v2.6.tar.gz
tar xf v2.6.tar.gz
cd oneDNN-2.6/
cmake . -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=lib
make -j4
sudo make install
sudo apt install -y python3-pip
pip3 install --upgrade pip
pip3 install numpy decorator attrs
pip3 install pillow tensorflow tflite opencv-python easydict typing-extensions psutil scipy tornado cloudpickle
- Install ONNX packages:
pip3 install onnx onnxoptimizer
- Install ONNX Runtime:
pip3 install onnxruntime
for CPU;pip3 install onnxruntime-gpu
for CUDA
pip3 list
orpip3 freeze
- Create requirements.txt
pip3 freeze > requirements.txt
- Install Python packages with requirements.txt
pip3 install -r requirements.txt
- Download *.tar.gz (e.g. apache-tvm-src-v0.15.0.tar.gz) at Release
tar zxvf
the downloaded *.tar.gz- Open a terminal and go to the directory containing decompressed files
- Open a terminal
cd ~
git clone --recursive https://github.com/apache/tvm.git
cd tvm
- Switch branches (optional)
git checkout
a commit
mkdir build
cp cmake/config.cmake build
cd build
- Customize your compilation options
vi config.cmake
cmake ..
make -j4
vi ~/.bashrc
- Add the following two to ~/.bashrc
- export TVM_HOME=/path_to_your_own_TVM
- export PYTHONPATH=$TVM_HOME/python:${PYTHONPATH}
source ~/.bashrc
python3 -c "import tvm"
- Download a quantized MobileNetV2 from Kaggle Models and extract it
- Download compile_run_mobilenetv2.py and run
python3 compile_run_mobilenetv2.py
- Expected result:
Prediction=> id: 282 name: tabby
- Show parameter(weight)
print(lib.get_params())
- show all modules generated with relay.build
print(lib.get_lib().imported_modules)
- Print host llvm code
print(lib.get_lib().imported_modules[0].get_source())
- Print device code
print(lib.get_lib().imported_modules[1].get_source())
- Return internal configuration
print(lib.get_executor_config())
- git clone https://bitbucket.org/icl/papi.git
- cd papi/src/
- ./configure --prefix=$PWD/install
- sudo sh -c "echo 2 > /proc/sys/kernel/perf_event_paranoid"
- Solve the problem: permission level does not permit operation
- make && make install
- cd install/bin
- ./papi_avail
- To list available metrics
- git clone --recursive https://github.com/apache/tvm.git
- cd tvm/
- mkdir build
- cd build/
- cp ../cmake/config.cmake .
- find [the directory where PAPI is cloned] -name papi.pc
- vi config.cmake to set: USE_LLVM ON
- vi config.cmake to set: USE_PAPI [the directory where papi.pc exists]
- cmake ..
- make -j4
- vi ~/.bashrc to set environment variable for TVM
- source ~/.bashrc
- git clone --recursive https://github.com/apache/incubator-tvm.git
- cd incubator-tvm
- git checkout <commit>
- git checkout -b <new_branch>
- git push --set-upstream origin <new_branch>
- git clone --recursive https://github.com/apache/incubator-tvm.git
- cd incubator-tvm
- git checkout <commit>
- git checkout -b <new_branch>
- git remote add <remote_name> <remote_URL>
- git remote -v
- git branch
- git config --global user.email <email>
- git config --list
- git push --set-upstream <remote_name> <new_branch>
- --set-upstream is equal to -u
- --set-upstream is used in the first upload
- git push <remote_name> <branch_name> for later upload
- git tag -l
- git push --set-upstream --tags <remote_name>
- cd [directory]
- git remote add [name for the hosting] [hosting's .git]
- git push -u [name for the hosting] --all
- git push -u [name for the hosting] --tags
- cd [directory]
- git init
- git remote add [name for the hosting] [hosting's .git]
- git add .
- git commit -m "Initial commit"
- git push -u [name for the hosting] master
- https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html#runfile-nouveau
- find ./ -type f -size +20M (find large files, in this example, 20MB)
- git log --graph --oneline --all (visualize git log)
- sudo apt remove cmake
- pip3 install cmake
- sudo ln /home/[account_name]/.local/bin/cmake /usr/bin/cmake
- cmake --version
- Deploy OpenCL runtime of Intel graphics
sudo apt install apt-file
sudo apt update
apt-file find libOpenCL.so
sudo add-apt-repository ppa:intel-opencl/intel-opencl
sudo apt update
sudo apt install intel-opencl-icd
- Upgrade graphics driver using Software Updater of Ubuntu
- Click on the 'Additional Drivers' tab
- Choose the latest driver provided by Ubuntu
- Some commands may need 'sudo'
- Refer to https://docs.tvm.ai/install/from_source.html
- https://note.nkmk.me/en/python-pip-list-freeze/
- https://note.nkmk.me/en/python-pip-install-requirements/
- https://tvm.apache.org/docs/install/from_source.html
- https://github.com/apache/tvm/blob/main/docker/install/ubuntu_install_dnnl.sh
- https://github.com/oneapi-src/oneDNN
- https://tvm.apache.org/docs/how_to/compile_models/from_tflite.html#sphx-glr-how-to-compile-models-from-tflite-py