VCTR
Loading...
Searching...
No Matches
SSERegister.h
1/*
2 ==============================================================================
3 DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4
5 Copyright 2022- by sonible GmbH.
6
7 This file is part of VCTR - Versatile Container Templates Reconceptualized.
8
9 VCTR is free software: you can redistribute it and/or modify
10 it under the terms of the GNU Lesser General Public License version 3
11 only, as published by the Free Software Foundation.
12
13 VCTR is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU Lesser General Public License version 3 for more details.
17
18 You should have received a copy of the GNU Lesser General Public License
19 version 3 along with VCTR. If not, see <https://www.gnu.org/licenses/>.
20 ==============================================================================
21*/
22
23namespace vctr
24{
25
26template <class T>
28{
29 static constexpr SSERegister broadcast (const T&) { return {}; }
30};
31
32#if VCTR_X64
33
34template <>
35struct SSERegister<float>
36{
37 static constexpr size_t numElements = 4;
38
39 using NativeType = __m128;
40 __m128 value;
41
42 //==============================================================================
43 // Loading
44 // clang-format off
45 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadUnaligned (const float* d) { return { _mm_loadu_ps (d) }; }
46 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadAligned (const float* d) { return { _mm_load_ps (d) }; }
47 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister broadcast (float x) { return { _mm_load1_ps (&x) }; }
48
49 //==============================================================================
50 // Storing
51 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeUnaligned (float* d) const { _mm_storeu_ps (d, value); }
52 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeAligned (float* d) const { _mm_store_ps (d, value); }
53
54 //==============================================================================
55 // Bit Operations
57 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister bitwiseAndNot (SSERegister a, SSERegister b) { return { _mm_andnot_ps (b.value, a.value) }; }
58
59 //==============================================================================
60 // Math
61 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister floor (SSERegister x) { return { _mm_floor_ps (x.value) }; }
62 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister ceil (SSERegister x) { return { _mm_ceil_ps (x.value) }; }
63 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister mul (SSERegister a, SSERegister b) { return { _mm_mul_ps (a.value, b.value) }; }
64 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister div (SSERegister a, SSERegister b) { return { _mm_div_ps (a.value, b.value) }; }
65 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister add (SSERegister a, SSERegister b) { return { _mm_add_ps (a.value, b.value) }; }
66 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister sub (SSERegister a, SSERegister b) { return { _mm_sub_ps (a.value, b.value) }; }
67 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister max (SSERegister a, SSERegister b) { return { _mm_max_ps (a.value, b.value) }; }
68 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister min (SSERegister a, SSERegister b) { return { _mm_min_ps (a.value, b.value) }; }
69
70 //==============================================================================
71 // Type conversion
72 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<int32_t> convertToInt (SSERegister x);
73 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<int32_t> reinterpretAsInt (SSERegister x);
74 // clang-format on
75};
76
77template <>
78struct SSERegister<double>
79{
80 static constexpr size_t numElements = 2;
81
82 using NativeType = __m128d;
83 __m128d value;
84
85 //==============================================================================
86 // Loading
87 // clang-format off
88 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadUnaligned (const double* d) { return { _mm_loadu_pd (d) }; }
89 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadAligned (const double* d) { return { _mm_load_pd (d) }; }
90 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister broadcast (double x) { return { _mm_load1_pd (&x) }; }
91
92 //==============================================================================
93 // Storing
94 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeUnaligned (double* d) const { _mm_storeu_pd (d, value); }
95 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeAligned (double* d) const { _mm_store_pd (d, value); }
96
97 //==============================================================================
98 // Bit Operations
100 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister bitwiseAndNot (SSERegister a, SSERegister b) { return { _mm_andnot_pd (b.value, a.value) }; }
101
102 //==============================================================================
103 // Math
104 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister floor (SSERegister x) { return { _mm_floor_pd (x.value) }; }
105 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister ceil (SSERegister x) { return { _mm_ceil_pd (x.value) }; }
106 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister mul (SSERegister a, SSERegister b) { return { _mm_mul_pd (a.value, b.value) }; }
107 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister div (SSERegister a, SSERegister b) { return { _mm_div_pd (a.value, b.value) }; }
108 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister add (SSERegister a, SSERegister b) { return { _mm_add_pd (a.value, b.value) }; }
109 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister sub (SSERegister a, SSERegister b) { return { _mm_sub_pd (a.value, b.value) }; }
110 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister max (SSERegister a, SSERegister b) { return { _mm_max_pd (a.value, b.value) }; }
111 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister min (SSERegister a, SSERegister b) { return { _mm_min_pd (a.value, b.value) }; }
112
113 //==============================================================================
114 // Type conversion
115 VCTR_FORCEDINLINE VCTR_TARGET ("avx512vl") VCTR_TARGET ("avx512dq") static SSERegister<int64_t> convertToInt (SSERegister x);
116 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<int64_t> reinterpretAsInt (SSERegister x);
117 // clang-format on
118};
119
120template <>
121struct SSERegister<int32_t>
122{
123 static constexpr size_t numElements = 4;
124
125 using NativeType = __m128i;
126 __m128i value;
127
128 //==============================================================================
129 // Loading
130 // clang-format off
131 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadUnaligned (const int32_t* d) { return { _mm_loadu_si128 (reinterpret_cast<const __m128i*> (d)) }; }
132 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadAligned (const int32_t* d) { return { _mm_load_si128 (reinterpret_cast<const __m128i*> (d)) }; }
133 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister broadcast (int32_t x) { return { _mm_set1_epi32 (x) }; }
134
135 //==============================================================================
136 // Storing
137 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeUnaligned (int32_t* d) const { _mm_storeu_si128 (reinterpret_cast<__m128i*> (d), value); }
138 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeAligned (int32_t* d) const { _mm_store_si128 (reinterpret_cast<__m128i*> (d), value); }
139
140 //==============================================================================
141 // Bit Operations
142 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister bitwiseAnd (SSERegister a, SSERegister b) { return { _mm_castps_si128 (_mm_and_ps (_mm_castsi128_ps (a.value), _mm_castsi128_ps (b.value))) }; }
143 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister bitwiseOr (SSERegister a, SSERegister b) { return { _mm_castps_si128 (_mm_or_ps (_mm_castsi128_ps (a.value), _mm_castsi128_ps (b.value))) }; }
144
145 //==============================================================================
146 // Math
147 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister abs (SSERegister x) { return { _mm_abs_epi32 (x.value) }; }
148 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister add (SSERegister a, SSERegister b) { return { _mm_add_epi32 (a.value, b.value) }; }
149 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister sub (SSERegister a, SSERegister b) { return { _mm_sub_epi32 (a.value, b.value) }; }
150 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister max (SSERegister a, SSERegister b) { return { _mm_max_epi32 (a.value, b.value) }; }
151 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister min (SSERegister a, SSERegister b) { return { _mm_min_epi32 (a.value, b.value) }; }
152
153 //==============================================================================
154 // Type conversion
155 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<float> convertToFp (SSERegister x) { return { _mm_cvtepi32_ps (x.value) }; }
156 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<float> reinterpretAsFp (SSERegister x) { return { _mm_castsi128_ps (x.value) }; }
157 // clang-format on
158};
159
160template <>
161struct SSERegister<uint32_t>
162{
163 static constexpr size_t numElements = 4;
164
165 using NativeType = __m128i;
166 __m128i value;
167
168 //==============================================================================
169 // Loading
170 // clang-format off
171 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadUnaligned (const uint32_t* d) { return { _mm_loadu_si128 (reinterpret_cast<const __m128i*> (d)) }; }
172 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadAligned (const uint32_t* d) { return { _mm_load_si128 (reinterpret_cast<const __m128i*> (d)) }; }
173 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister broadcast (uint32_t x) { return { _mm_set1_epi32 ((int32_t) x) }; }
174
175 //==============================================================================
176 // Storing
177 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeUnaligned (uint32_t* d) const { _mm_storeu_si128 (reinterpret_cast<__m128i*> (d), value); }
178 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeAligned (uint32_t* d) const { _mm_store_si128 (reinterpret_cast<__m128i*> (d), value); }
179
180 //==============================================================================
181 // Bit Operations
182
183 //==============================================================================
184 // Math
185 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister add (SSERegister a, SSERegister b) { return { _mm_add_epi32 (a.value, b.value) }; }
186 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister sub (SSERegister a, SSERegister b) { return { _mm_sub_epi32 (a.value, b.value) }; }
187 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister max (SSERegister a, SSERegister b) { return { _mm_max_epu32 (a.value, b.value) }; }
188 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister min (SSERegister a, SSERegister b) { return { _mm_min_epu32 (a.value, b.value) }; }
189 // clang-format on
190};
191
192template <>
193struct SSERegister<int64_t>
194{
195 static constexpr size_t numElements = 2;
196
197 using NativeType = __m128i;
198 __m128i value;
199
200 //==============================================================================
201 // Loading
202 // clang-format off
203 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadUnaligned (const int64_t* d) { return { _mm_loadu_si128 (reinterpret_cast<const __m128i*> (d)) }; }
204 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadAligned (const int64_t* d) { return { _mm_load_si128 (reinterpret_cast<const __m128i*> (d)) }; }
205 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister broadcast (int64_t x) { return { _mm_set1_epi64x (x) }; }
206
207 //==============================================================================
208 // Storing
209 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeUnaligned (int64_t* d) const { _mm_storeu_si128 (reinterpret_cast<__m128i*> (d), value); }
210 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeAligned (int64_t* d) const { _mm_store_si128 (reinterpret_cast<__m128i*> (d), value); }
211
212 //==============================================================================
213 // Bit Operations
214 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister bitwiseAnd (SSERegister a, SSERegister b) { return { _mm_castpd_si128 (_mm_and_pd (_mm_castsi128_pd (a.value), _mm_castsi128_pd (b.value))) }; }
215 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister bitwiseOr (SSERegister a, SSERegister b) { return { _mm_castpd_si128 (_mm_or_pd (_mm_castsi128_pd (a.value), _mm_castsi128_pd (b.value))) }; }
216
217 //==============================================================================
218 // Math
219 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister add (SSERegister a, SSERegister b) { return { _mm_add_epi64 (a.value, b.value) }; }
220 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister sub (SSERegister a, SSERegister b) { return { _mm_sub_epi64 (a.value, b.value) }; }
221
222 //==============================================================================
223 // Type conversion
224 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<double> convertToFp (SSERegister x) { return { _mm_cvtepi64_pd (x.value) }; }
225 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister<double> reinterpretAsFp (SSERegister x) { return { _mm_castsi128_pd (x.value) }; }
226 // clang-format on
227};
228
229template <>
230struct SSERegister<uint64_t>
231{
232 static constexpr size_t numElements = 2;
233
234 using NativeType = __m128i;
235 __m128i value;
236
237 //==============================================================================
238 // Loading
239 // clang-format off
240 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadUnaligned (const uint64_t* d) { return { _mm_loadu_si128 (reinterpret_cast<const __m128i*> (d)) }; }
241 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister loadAligned (const uint64_t* d) { return { _mm_load_si128 (reinterpret_cast<const __m128i*> (d)) }; }
242 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister broadcast (uint64_t x) { return { _mm_set1_epi64x ((int64_t) x) }; }
243
244 //==============================================================================
245 // Storing
246 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeUnaligned (uint64_t* d) const { _mm_storeu_si128 (reinterpret_cast<__m128i*> (d), value); }
247 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") void storeAligned (uint64_t* d) const { _mm_store_si128 (reinterpret_cast<__m128i*> (d), value); }
248
249 //==============================================================================
250 // Bit Operations
251
252 //==============================================================================
253 // Math
254 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister add (SSERegister a, SSERegister b) { return { _mm_add_epi64 (a.value, b.value) }; }
255 VCTR_FORCEDINLINE VCTR_TARGET ("sse4.1") static SSERegister sub (SSERegister a, SSERegister b) { return { _mm_sub_epi64 (a.value, b.value) }; }
256 // clang-format on
257};
258
259inline SSERegister<int32_t> SSERegister<float>::convertToInt (SSERegister x) { return { _mm_cvtps_epi32 (x.value) }; }
260inline SSERegister<int32_t> SSERegister<float>::reinterpretAsInt (SSERegister x) { return { _mm_castps_si128 (x.value) }; }
261inline SSERegister<int64_t> SSERegister<double>::convertToInt (SSERegister x) { return { _mm_cvtpd_epi64 (x.value) }; }
262inline SSERegister<int64_t> SSERegister<double>::reinterpretAsInt (SSERegister x) { return { _mm_castpd_si128 (x.value) }; }
263
264#endif
265
266} // namespace vctr
constexpr ExpressionChainBuilder< expressions::Max > max
Computes the maximum value of the source values.
Definition: Max.h:198
constexpr ExpressionChainBuilder< expressions::Abs > abs
Computes the absolute value of the source values.
Definition: Abs.h:135
constexpr ExpressionChainBuilder< expressions::Min > min
Computes the minimum value of the source values.
Definition: Min.h:198
The main namespace of the VCTR project.
Definition: Array.h:24
Definition: SSERegister.h:28