@@ -2103,6 +2103,19 @@ namespace xsimd
2103
2103
return { reduce_add (self.real ()), reduce_add (self.imag ()) };
2104
2104
}
2105
2105
2106
+ template <class A , class T , class /* =typename std::enable_if<std::is_scalar<T>::value, void>::type*/ >
2107
+ XSIMD_INLINE T reduce_add (batch<T, A> const & self, requires_arch<common>) noexcept
2108
+ {
2109
+ alignas (A::alignment ()) T buffer[batch<T, A>::size];
2110
+ self.store_aligned (buffer);
2111
+ T res = 0 ;
2112
+ for (T val : buffer)
2113
+ {
2114
+ res += val;
2115
+ }
2116
+ return res;
2117
+ }
2118
+
2106
2119
namespace detail
2107
2120
{
2108
2121
template <class T , T N>
@@ -2147,6 +2160,34 @@ namespace xsimd
2147
2160
self, std::integral_constant<unsigned , batch<T, A>::size>());
2148
2161
}
2149
2162
2163
+ // reduce_mul
2164
+ template <class A , class T >
2165
+ XSIMD_INLINE std::complex <T> reduce_mul (batch<std::complex <T>, A> const & self, requires_arch<common>) noexcept
2166
+ {
2167
+ // FIXME: could do better
2168
+ alignas (A::alignment ()) std::complex <T> buffer[batch<std::complex <T>, A>::size];
2169
+ self.store_aligned (buffer);
2170
+ std::complex <T> res = 1 ;
2171
+ for (auto val : buffer)
2172
+ {
2173
+ res *= val;
2174
+ }
2175
+ return res;
2176
+ }
2177
+
2178
+ template <class A , class T , class /* =typename std::enable_if<std::is_scalar<T>::value, void>::type*/ >
2179
+ XSIMD_INLINE T reduce_mul (batch<T, A> const & self, requires_arch<common>) noexcept
2180
+ {
2181
+ alignas (A::alignment ()) T buffer[batch<T, A>::size];
2182
+ self.store_aligned (buffer);
2183
+ T res = 1 ;
2184
+ for (T val : buffer)
2185
+ {
2186
+ res *= val;
2187
+ }
2188
+ return res;
2189
+ }
2190
+
2150
2191
// remainder
2151
2192
template <class A >
2152
2193
XSIMD_INLINE batch<float , A> remainder (batch<float , A> const & self, batch<float , A> const & other, requires_arch<common>) noexcept
0 commit comments