Skip to content

Commit

Permalink
add channels last for MaxPool2d
Browse files Browse the repository at this point in the history
max_pool2d channels last support forward path

max_pool2d channels last support backward path

vectorize channels last forward path

rename the header file

fix windows build

combine PoolingKernel.h into Pool.h

add data type check

loosen test_max_pool2d_nhwc to cover device CPU

ghstack-source-id: 2f2487b881ab8080b261d07fc87c46fcef59b8a0
Pull Request resolved: pytorch#48917
  • Loading branch information
mingfeima committed Apr 19, 2021
1 parent 2219286 commit 527763c
Show file tree
Hide file tree
Showing 10 changed files with 465 additions and 291 deletions.
11 changes: 11 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -202,6 +202,17 @@ struct Vec256 {
}
return mask;
}
Vec256<T> isnan() const {
Vec256<T> vec;
for (int64_t i = 0; i != size(); i++) {
if (_isnan(values[i])) {
std::memset(static_cast<void*>(vec.values + i), 0xFF, sizeof(T));
} else {
std::memset(static_cast<void*>(vec.values + i), 0, sizeof(T));
}
}
return vec;
}
Vec256<T> map(T (*f)(T)) const {
Vec256<T> ret;
for (int64_t i = 0; i != size(); i++) {
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_double.h
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,9 @@ template <> class Vec256<double> {
__m256d cmp = _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_EQ_OQ);
return _mm256_movemask_pd(cmp);
}
Vec256<double> isnan() const {
return _mm256_cmp_pd(values, _mm256_set1_pd(0.0), _CMP_UNORD_Q);
}
Vec256<double> map(double (*f)(double)) const {
__at_align32__ double tmp[size()];
store(tmp);
Expand Down
3 changes: 3 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_float.h
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,9 @@ template <> class Vec256<float> {
__m256 cmp = _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_EQ_OQ);
return _mm256_movemask_ps(cmp);
}
Vec256<float> isnan() const {
return _mm256_cmp_ps(values, _mm256_set1_ps(0.0f), _CMP_UNORD_Q);
}
Vec256<float> map(float (*f)(float)) const {
__at_align32__ float tmp[size()];
store(tmp);
Expand Down
13 changes: 13 additions & 0 deletions aten/src/ATen/cpu/vec256/vec256_float_neon.h
Original file line number Diff line number Diff line change
Expand Up @@ -283,6 +283,19 @@ template <> class Vec256<float> {
}
return mask;
}
Vec256<float> isnan() const {
__at_align32__ float tmp[size()];
__at_align32__ float res[size()];
store(tmp);
for (int i = 0; i < size(); i++) {
if (_isnan(tmp[i])) {
std::memset(static_cast<void*>(&res[i]), 0xFF, sizeof(float));
} else {
std::memset(static_cast<void*>(&res[i]), 0, sizeof(float));
}
}
return loadu(res);
};
Vec256<float> map(float (*f)(float)) const {
__at_align32__ float tmp[size()];
store(tmp);
Expand Down

0 comments on commit 527763c

Please sign in to comment.