29 static constexpr SSERegister broadcast (
const T&) {
return {}; }
37 static constexpr size_t numElements = 4;
39 using NativeType = __m128;
45 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static
SSERegister loadUnaligned (const
float* d) {
return { _mm_loadu_ps (d) }; }
46 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadAligned (const
float* d) {
return { _mm_load_ps (d) }; }
47 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister broadcast (
float x) {
return { _mm_load1_ps (&x) }; }
51 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeUnaligned (
float* d)
const { _mm_storeu_ps (d, value); }
52 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeAligned (
float* d)
const { _mm_store_ps (d, value); }
57 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister bitwiseAndNot (SSERegister a, SSERegister b) {
return { _mm_andnot_ps (b.value, a.value) }; }
61 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister floor (SSERegister x) {
return { _mm_floor_ps (x.value) }; }
62 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister ceil (SSERegister x) {
return { _mm_ceil_ps (x.value) }; }
63 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister mul (SSERegister a, SSERegister b) {
return { _mm_mul_ps (a.value, b.value) }; }
64 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister div (SSERegister a, SSERegister b) {
return { _mm_div_ps (a.value, b.value) }; }
65 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister add (SSERegister a, SSERegister b) {
return { _mm_add_ps (a.value, b.value) }; }
66 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister sub (SSERegister a, SSERegister b) {
return { _mm_sub_ps (a.value, b.value) }; }
67 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
max (SSERegister a, SSERegister b) {
return { _mm_max_ps (a.value, b.value) }; }
68 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
min (SSERegister a, SSERegister b) {
return { _mm_min_ps (a.value, b.value) }; }
72 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister<int32_t> convertToInt (SSERegister x);
73 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<int32_t> reinterpretAsInt (SSERegister x);
78struct SSERegister<
double>
80 static constexpr size_t numElements = 2;
82 using NativeType = __m128d;
88 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadUnaligned (const
double* d) {
return { _mm_loadu_pd (d) }; }
89 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadAligned (const
double* d) {
return { _mm_load_pd (d) }; }
90 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister broadcast (
double x) {
return { _mm_load1_pd (&x) }; }
94 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeUnaligned (
double* d)
const { _mm_storeu_pd (d, value); }
95 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeAligned (
double* d)
const { _mm_store_pd (d, value); }
100 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister bitwiseAndNot (SSERegister a, SSERegister b) {
return { _mm_andnot_pd (b.value, a.value) }; }
104 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister floor (SSERegister x) {
return { _mm_floor_pd (x.value) }; }
105 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister ceil (SSERegister x) {
return { _mm_ceil_pd (x.value) }; }
106 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister mul (SSERegister a, SSERegister b) {
return { _mm_mul_pd (a.value, b.value) }; }
107 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister div (SSERegister a, SSERegister b) {
return { _mm_div_pd (a.value, b.value) }; }
108 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister add (SSERegister a, SSERegister b) {
return { _mm_add_pd (a.value, b.value) }; }
109 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister sub (SSERegister a, SSERegister b) {
return { _mm_sub_pd (a.value, b.value) }; }
110 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
max (SSERegister a, SSERegister b) {
return { _mm_max_pd (a.value, b.value) }; }
111 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
min (SSERegister a, SSERegister b) {
return { _mm_min_pd (a.value, b.value) }; }
115 VCTR_FORCEDINLINE VCTR_TARGET (
"avx512vl") VCTR_TARGET ("avx512dq") static SSERegister<int64_t> convertToInt (SSERegister x);
116 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<int64_t> reinterpretAsInt (SSERegister x);
121struct SSERegister<int32_t>
123 static constexpr size_t numElements = 4;
125 using NativeType = __m128i;
131 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadUnaligned (const int32_t* d) {
return { _mm_loadu_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
132 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadAligned (const int32_t* d) {
return { _mm_load_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
133 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister broadcast (int32_t x) {
return { _mm_set1_epi32 (x) }; }
137 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeUnaligned (int32_t* d)
const { _mm_storeu_si128 (
reinterpret_cast<__m128i*
> (d), value); }
138 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeAligned (int32_t* d)
const { _mm_store_si128 (
reinterpret_cast<__m128i*
> (d), value); }
142 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister bitwiseAnd (SSERegister a, SSERegister b) {
return { _mm_castps_si128 (_mm_and_ps (_mm_castsi128_ps (a.value), _mm_castsi128_ps (b.value))) }; }
143 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister bitwiseOr (SSERegister a, SSERegister b) {
return { _mm_castps_si128 (_mm_or_ps (_mm_castsi128_ps (a.value), _mm_castsi128_ps (b.value))) }; }
147 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
abs (SSERegister x) {
return { _mm_abs_epi32 (x.value) }; }
148 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister add (SSERegister a, SSERegister b) {
return { _mm_add_epi32 (a.value, b.value) }; }
149 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister sub (SSERegister a, SSERegister b) {
return { _mm_sub_epi32 (a.value, b.value) }; }
150 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
max (SSERegister a, SSERegister b) {
return { _mm_max_epi32 (a.value, b.value) }; }
151 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister
min (SSERegister a, SSERegister b) {
return { _mm_min_epi32 (a.value, b.value) }; }
155 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister<
float> convertToFp (SSERegister x) {
return { _mm_cvtepi32_ps (x.value) }; }
156 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister<
float> reinterpretAsFp (SSERegister x) {
return { _mm_castsi128_ps (x.value) }; }
161struct SSERegister<uint32_t>
163 static constexpr size_t numElements = 4;
165 using NativeType = __m128i;
171 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadUnaligned (const uint32_t* d) {
return { _mm_loadu_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
172 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadAligned (const uint32_t* d) {
return { _mm_load_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
173 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister broadcast (uint32_t x) {
return { _mm_set1_epi32 ((int32_t) x) }; }
177 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeUnaligned (uint32_t* d)
const { _mm_storeu_si128 (
reinterpret_cast<__m128i*
> (d), value); }
178 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeAligned (uint32_t* d)
const { _mm_store_si128 (
reinterpret_cast<__m128i*
> (d), value); }
185 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister add (SSERegister a, SSERegister b) {
return { _mm_add_epi32 (a.value, b.value) }; }
186 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister sub (SSERegister a, SSERegister b) {
return { _mm_sub_epi32 (a.value, b.value) }; }
187 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister max (SSERegister a, SSERegister b) {
return { _mm_max_epu32 (a.value, b.value) }; }
188 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister min (SSERegister a, SSERegister b) {
return { _mm_min_epu32 (a.value, b.value) }; }
193struct SSERegister<int64_t>
195 static constexpr size_t numElements = 2;
197 using NativeType = __m128i;
203 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadUnaligned (const int64_t* d) {
return { _mm_loadu_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
204 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadAligned (const int64_t* d) {
return { _mm_load_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
205 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister broadcast (int64_t x) {
return { _mm_set1_epi64x (x) }; }
209 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeUnaligned (int64_t* d)
const { _mm_storeu_si128 (
reinterpret_cast<__m128i*
> (d), value); }
210 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeAligned (int64_t* d)
const { _mm_store_si128 (
reinterpret_cast<__m128i*
> (d), value); }
214 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister bitwiseAnd (SSERegister a, SSERegister b) {
return { _mm_castpd_si128 (_mm_and_pd (_mm_castsi128_pd (a.value), _mm_castsi128_pd (b.value))) }; }
215 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister bitwiseOr (SSERegister a, SSERegister b) {
return { _mm_castpd_si128 (_mm_or_pd (_mm_castsi128_pd (a.value), _mm_castsi128_pd (b.value))) }; }
219 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister add (SSERegister a, SSERegister b) {
return { _mm_add_epi64 (a.value, b.value) }; }
220 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister sub (SSERegister a, SSERegister b) {
return { _mm_sub_epi64 (a.value, b.value) }; }
224 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister<
double> convertToFp (SSERegister x) {
return { _mm_cvtepi64_pd (x.value) }; }
225 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister<
double> reinterpretAsFp (SSERegister x) {
return { _mm_castsi128_pd (x.value) }; }
230struct SSERegister<uint64_t>
232 static constexpr size_t numElements = 2;
234 using NativeType = __m128i;
240 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadUnaligned (const uint64_t* d) {
return { _mm_loadu_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
241 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister loadAligned (const uint64_t* d) {
return { _mm_load_si128 (
reinterpret_cast<const __m128i*
> (d)) }; }
242 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister broadcast (uint64_t x) {
return { _mm_set1_epi64x ((int64_t) x) }; }
246 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeUnaligned (uint64_t* d)
const { _mm_storeu_si128 (
reinterpret_cast<__m128i*
> (d), value); }
247 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") void storeAligned (uint64_t* d)
const { _mm_store_si128 (
reinterpret_cast<__m128i*
> (d), value); }
254 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister add (SSERegister a, SSERegister b) {
return { _mm_add_epi64 (a.value, b.value) }; }
255 VCTR_FORCEDINLINE VCTR_TARGET (
"sse4.1") static SSERegister sub (SSERegister a, SSERegister b) {
return { _mm_sub_epi64 (a.value, b.value) }; }
259inline SSERegister<int32_t> SSERegister<float>::convertToInt (SSERegister x) {
return { _mm_cvtps_epi32 (x.value) }; }
260inline SSERegister<int32_t> SSERegister<float>::reinterpretAsInt (SSERegister x) {
return { _mm_castps_si128 (x.value) }; }
261inline SSERegister<int64_t> SSERegister<double>::convertToInt (SSERegister x) {
return { _mm_cvtpd_epi64 (x.value) }; }
262inline SSERegister<int64_t> SSERegister<double>::reinterpretAsInt (SSERegister x) {
return { _mm_castpd_si128 (x.value) }; }
constexpr ExpressionChainBuilder< expressions::Max > max
Computes the maximum value of the source values.
Definition: Max.h:198
constexpr ExpressionChainBuilder< expressions::Abs > abs
Computes the absolute value of the source values.
Definition: Abs.h:135
constexpr ExpressionChainBuilder< expressions::Min > min
Computes the minimum value of the source values.
Definition: Min.h:198
The main namespace of the VCTR project.
Definition: Array.h:24
Definition: SSERegister.h:28