-
Notifications
You must be signed in to change notification settings - Fork 325
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* multi device develop * change build.sh * cuda tools move into kernels * multi device develop * multi device develop * multi device develop * solve multi device develop * solve arm compile error * arm print_vec * fix regress error * solve cuda compile error * solve multi device print vec * split node class wit its derived class * lightseq x86 unit test * add x86 unit test * Canonical Namespace * add pybind compile * Lsflow develop (#463) * Fix new arch context build check(#441) problem : LinearOp::forward is getting cublashandle without checking if context is built. LinearOp::backward is checking if the context is built before getting cublashandle. solution: Modify LinearOp::forward to check if context is built before getting cublashandle. * fix config reference bug (#453) * developing lsflow * add split_head op and its test (#454) * lsflow develop * format note message * lsflow tune * add notes for context class * add note for lsflow * op example * Update CODEOWNERS (#457) * Gpt infer (#456) * add split head for beam search * alter checkin * make launch_transform_0213 more clear (#459) * fix operator compile * make launch_transform_0213 more clear (#460) * change max_shape to max_shape_size * correct bias shape in split_head (#461) * add unit test for x86 cpu kernel --------- Co-authored-by: Kangmo Kim <kangmo.kim@gmail.com> Co-authored-by: Ying Xiong <xiongying.taka@bytedance.com> Co-authored-by: Xiaohui Wang <wangxiaohui.neo@bytedance.com> * fix sys.path * fix sys.path (#466) * jit build support pure cpu machine * robust builder for x86 and cuda * fix compile error * develop test_ls_layer * format * fix training compile problem * add mkl gemm for f32 and s8 (#470) * test for encoder layer (#471) * test encoder layer * fix cuda free error * lightseq multi device develop * fix crf op error * avoid import training directories * fix strided_batch_gemm config data type * add debug message * convert shape from vector<int> to vector<size_t> * change debug log format * format * remove useless dropout * add sdpa layer into multi head attention layer * fix conflict parameter: is_post_ln and pre_or_po... * fix CMakeLists.txt compile * multi kernel develop * lightseq transformer.cu fix * fix linear col/row major * fix compile error * fix lightseq post_ln network structure * add transformer example & print error message * add shape message * format * fix concat error * add shape message * fix beam search bug --------- Co-authored-by: Kangmo Kim <kangmo.kim@gmail.com> Co-authored-by: Ying Xiong <xiongying.taka@bytedance.com> Co-authored-by: Xiaohui Wang <wangxiaohui.neo@bytedance.com>
- Loading branch information
1 parent
6bec6dd
commit 2ead283
Showing
245 changed files
with
15,178 additions
and
3,211 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,7 @@ | ||
if [ ! -d 'build' ]; then | ||
mkdir build | ||
fi | ||
|
||
cd build && cmake -DUSE_NEW_ARCH=OFF -DUSE_TRITONBACKEND=ON -DDEBUG_MODE=OFF -DFP16_MODE=ON -DMEM_DEBUG=OFF .. && make -j${nproc} | ||
# DEVICE_ARCH could be cuda/x86/arm | ||
cd build && cmake -DUSE_NEW_ARCH=ON -DDEVICE_ARCH=cuda -DUSE_TRITONBACKEND=OFF -DDEBUG_MODE=ON -DFP16_MODE=OFF -DMEM_DEBUG=OFF .. && make -j${nproc} | ||
# you can use comand like below to compile lightseq with pybind interface: | ||
# sudo PATH=$PATH:/usr/local/hdf5 CUDACXX=/usr/local/cuda/bin/nvcc DEVICE_ARCH=x86 ENABLE_FP32=1 ENABLE_DEBUG=0 ENABLE_NEW_ARCH=1 python3 setup.py install |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,91 @@ | ||
#include "model_base.h" | ||
#include "util.h" | ||
|
||
/** | ||
@file | ||
Example of how to run transformer inference using our implementation. | ||
*/ | ||
|
||
int main(int argc, char* argv[]) { | ||
std::string model_weights_path = argv[1]; | ||
|
||
std::vector<int> example_input = {63, 47, 65, 1507, 88, 74, | ||
10, 2057, 362, 9, 284, 6}; | ||
int eg_seq_len = example_input.size(); | ||
int batch_size = 1, batch_seq_len = example_input.size(); | ||
if (argc == 4) { | ||
batch_size = atoi(argv[2]); | ||
batch_seq_len = atoi(argv[3]); | ||
} | ||
|
||
int max_batch_size = std::max(4, batch_size); | ||
std::vector<int> host_input; | ||
for (int i = 0; i < batch_size; ++i) { | ||
for (int j = 0; j < batch_seq_len; ++j) { | ||
host_input.push_back(example_input[j % eg_seq_len]); | ||
} | ||
} | ||
|
||
auto model = lightseq::LSModelFactory::GetInstance().CreateModel( | ||
"Transformer", model_weights_path, max_batch_size); | ||
|
||
void* d_input; | ||
CHECK_GPU_ERROR( | ||
cudaMalloc(&d_input, sizeof(int) * batch_size * batch_seq_len)); | ||
CHECK_GPU_ERROR(cudaMemcpy(d_input, host_input.data(), | ||
sizeof(int) * batch_size * batch_seq_len, | ||
cudaMemcpyHostToDevice)); | ||
|
||
// model->benchmark_mode(true); | ||
model->set_input_ptr(0, d_input); | ||
model->set_input_shape(0, {batch_size, batch_seq_len}); | ||
|
||
for (int i = 0; i < model->get_output_size(); i++) { | ||
void* d_output; | ||
std::vector<int> shape = model->get_output_max_shape(i); | ||
int total_size = 1; | ||
for (int j = 0; j < shape.size(); j++) { | ||
total_size *= shape[j]; | ||
} | ||
CHECK_GPU_ERROR(cudaMalloc(&d_output, total_size * sizeof(int))); | ||
model->set_output_ptr(i, d_output); | ||
} | ||
CHECK_GPU_ERROR(cudaStreamSynchronize(0)); | ||
std::cout << "infer preprocessing finished" << std::endl; | ||
|
||
std::chrono::duration<double> elapsed; | ||
int iter = 0; | ||
/* ---step5. infer and log--- */ | ||
for (int i = 0; i < 1; i++) { | ||
auto start = std::chrono::high_resolution_clock::now(); | ||
model->Infer(); | ||
auto finish = std::chrono::high_resolution_clock::now(); | ||
if (i >= 5) { | ||
iter++; | ||
elapsed += finish - start; | ||
} | ||
} | ||
|
||
std::cout << "lightseq inference latency: " << elapsed.count() * 1000 / iter | ||
<< " ms" << std::endl; | ||
|
||
for (int i = 0; i < model->get_output_size(); i++) { | ||
const void* d_output; | ||
d_output = static_cast<const float*>(model->get_output_ptr(i)); | ||
std::vector<int> shape = model->get_output_shape(i); | ||
std::cout << "output shape: "; | ||
int size = 1; | ||
for (int j = 0; j < shape.size(); j++) { | ||
std::cout << shape[j] << " "; | ||
size *= shape[j]; | ||
} | ||
std::cout << std::endl; | ||
|
||
if (!i) | ||
lightseq::print_vec((int*)d_output, "output", size); | ||
else | ||
lightseq::print_vec((float*)d_output, "output", size); | ||
} | ||
|
||
return 0; | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,7 @@ | ||
cmake_minimum_required(VERSION 3.18 FATAL_ERROR) | ||
|
||
cmake_minimum_required(VERSION 3.18) | ||
set(lightseq_kernel_files gemm.cc utils.cc) | ||
|
||
add_library(lightseq_kernels STATIC ${lightseq_kernel_files}) | ||
target_include_directories(lightseq_kernels INTERFACE includes) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,5 @@ | ||
#include "kernel_headers.h" | ||
|
||
namespace lightseq { | ||
namespace arm {} // namespace arm | ||
} // namespace lightseq |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,13 @@ | ||
#pragma once | ||
|
||
#include <math_constants.h> | ||
#include <type_traits> | ||
#include <chrono> | ||
#include <fstream> | ||
#include <iostream> | ||
#include <string> | ||
#include <vector> | ||
#include <stdexcept> | ||
#include <functional> | ||
|
||
#include "utils.h" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,9 @@ | ||
#include "cstdio" | ||
#include "iostream" | ||
|
||
namespace lightseq { | ||
|
||
template <typename T> | ||
void print_vec(const T *outv, std::string outn, int num_output_ele); | ||
|
||
} |
Oops, something went wrong.