Skip to content

Commit

Permalink
Avoid special casing for MinGW
Browse files Browse the repository at this point in the history
after some testing, no version of MinGW/gcc has been found where this code is still necessary.
Probably older code (pre-c++17?)

closes #2891

No functional change
  • Loading branch information
dorzechowski authored and vondele committed Aug 9, 2020
1 parent 2bfde55 commit a6e8929
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 98 deletions.
29 changes: 5 additions & 24 deletions src/nnue/layers/affine_transform.h
Expand Up @@ -104,13 +104,8 @@ namespace Eval::NNUE::Layers {
__m512i sum = _mm512_setzero_si512();
const auto row = reinterpret_cast<const __m512i*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {

#if defined(__MINGW32__) || defined(__MINGW64__)
__m512i product = _mm512_maddubs_epi16(_mm512_loadu_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
#else
__m512i product = _mm512_maddubs_epi16(_mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
#endif

__m512i product = _mm512_maddubs_epi16(
_mm512_load_si512(&input_vector[j]), _mm512_load_si512(&row[j]));
product = _mm512_madd_epi16(product, kOnes);
sum = _mm512_add_epi32(sum, product);
}
Expand All @@ -125,12 +120,8 @@ namespace Eval::NNUE::Layers {
const auto row_256 = reinterpret_cast<const __m256i*>(&weights_[offset]);
int j = kNumChunks * 2;

#if defined(__MINGW32__) || defined(__MINGW64__) // See HACK comment below in AVX2.
__m256i sum256 = _mm256_maddubs_epi16(_mm256_loadu_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
#else
__m256i sum256 = _mm256_maddubs_epi16(_mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
#endif

__m256i sum256 = _mm256_maddubs_epi16(
_mm256_load_si256(&iv_256[j]), _mm256_load_si256(&row_256[j]));
sum256 = _mm256_madd_epi16(sum256, _mm256_set1_epi16(1));
sum256 = _mm256_hadd_epi32(sum256, sum256);
sum256 = _mm256_hadd_epi32(sum256, sum256);
Expand All @@ -144,17 +135,7 @@ namespace Eval::NNUE::Layers {
const auto row = reinterpret_cast<const __m256i*>(&weights_[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
__m256i product = _mm256_maddubs_epi16(

#if defined(__MINGW32__) || defined(__MINGW64__)
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
// even though alignas is specified.
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&input_vector[j]), _mm256_load_si256(&row[j]));
_mm256_load_si256(&input_vector[j]), _mm256_load_si256(&row[j]));
product = _mm256_madd_epi16(product, kOnes);
sum = _mm256_add_epi32(sum, product);
}
Expand Down
49 changes: 6 additions & 43 deletions src/nnue/layers/clipped_relu.h
Expand Up @@ -74,50 +74,13 @@ namespace Eval::NNUE::Layers {
const auto out = reinterpret_cast<__m256i*>(output);
for (IndexType i = 0; i < kNumChunks; ++i) {
const __m256i words0 = _mm256_srai_epi16(_mm256_packs_epi32(

#if defined(__MINGW32__) || defined(__MINGW64__)
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
// even though alignas is specified.
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&in[i * 4 + 0]),

#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&in[i * 4 + 1])), kWeightScaleBits);
_mm256_load_si256(&in[i * 4 + 0]),
_mm256_load_si256(&in[i * 4 + 1])), kWeightScaleBits);
const __m256i words1 = _mm256_srai_epi16(_mm256_packs_epi32(

#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&in[i * 4 + 2]),

#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&in[i * 4 + 3])), kWeightScaleBits);

#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_storeu_si256
#else
_mm256_store_si256
#endif

(&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_load_si256(&in[i * 4 + 2]),
_mm256_load_si256(&in[i * 4 + 3])), kWeightScaleBits);
_mm256_store_si256(
&out[i], _mm256_permutevar8x32_epi32(_mm256_max_epi8(
_mm256_packs_epi16(words0, words1), kZero), kOffsets));
}
constexpr IndexType kStart = kNumChunks * kSimdWidth;
Expand Down
34 changes: 3 additions & 31 deletions src/nnue/nnue_feature_transformer.h
Expand Up @@ -110,36 +110,12 @@ namespace Eval::NNUE {
auto out = reinterpret_cast<__m256i*>(&output[offset]);
for (IndexType j = 0; j < kNumChunks; ++j) {
__m256i sum0 =

#if defined(__MINGW32__) || defined(__MINGW64__)
// HACK: Use _mm256_loadu_si256() instead of _mm256_load_si256. Because the binary
// compiled with g++ in MSYS2 crashes here because the output memory is not aligned
// even though alignas is specified.
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&reinterpret_cast<const __m256i*>(
_mm256_load_si256(&reinterpret_cast<const __m256i*>(
accumulation[perspectives[p]][0])[j * 2 + 0]);
__m256i sum1 =

#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_loadu_si256
#else
_mm256_load_si256
#endif

(&reinterpret_cast<const __m256i*>(
_mm256_load_si256(&reinterpret_cast<const __m256i*>(
accumulation[perspectives[p]][0])[j * 2 + 1]);

#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_storeu_si256
#else
_mm256_store_si256
#endif

(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
_mm256_store_si256(&out[j], _mm256_permute4x64_epi64(_mm256_max_epi8(
_mm256_packs_epi16(sum0, sum1), kZero), kControl));
}

Expand Down Expand Up @@ -202,11 +178,7 @@ namespace Eval::NNUE {
auto column = reinterpret_cast<const __m256i*>(&weights_[offset]);
constexpr IndexType kNumChunks = kHalfDimensions / (kSimdWidth / 2);
for (IndexType j = 0; j < kNumChunks; ++j) {
#if defined(__MINGW32__) || defined(__MINGW64__)
_mm256_storeu_si256(&accumulation[j], _mm256_add_epi16(_mm256_loadu_si256(&accumulation[j]), column[j]));
#else
accumulation[j] = _mm256_add_epi16(accumulation[j], column[j]);
#endif
}

#elif defined(USE_SSE2)
Expand Down

0 comments on commit a6e8929

Please sign in to comment.