65 #ifndef INCLUDED_volk_32fc_x2_square_dist_32f_a_H
66 #define INCLUDED_volk_32fc_x2_square_dist_32f_a_H
73 #include <immintrin.h>
75 static inline void volk_32fc_x2_square_dist_32f_a_avx2(
float* target,
78 unsigned int num_points)
80 const unsigned int num_bytes = num_points * 8;
82 __m256 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
86 int bound = num_bytes >> 6;
87 int leftovers0 = (num_bytes >> 5) & 1;
88 int leftovers1 = (num_bytes >> 4) & 1;
89 int leftovers2 = (num_bytes >> 3) & 1;
92 __m256i idx = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
93 xmm1 = _mm256_setzero_ps();
95 xmm0 = _mm_permute_ps(xmm0, 0b01000100);
96 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 0);
97 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 1);
99 for (;
i < bound; ++
i) {
100 xmm2 = _mm256_load_ps((
float*)&points[0]);
101 xmm3 = _mm256_load_ps((
float*)&points[4]);
104 xmm4 = _mm256_sub_ps(xmm1, xmm2);
105 xmm5 = _mm256_sub_ps(xmm1, xmm3);
106 xmm6 = _mm256_mul_ps(xmm4, xmm4);
107 xmm7 = _mm256_mul_ps(xmm5, xmm5);
109 xmm4 = _mm256_hadd_ps(xmm6, xmm7);
110 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
112 _mm256_store_ps(target, xmm4);
117 for (
i = 0;
i < leftovers0; ++
i) {
119 xmm2 = _mm256_load_ps((
float*)&points[0]);
121 xmm4 = _mm256_sub_ps(xmm1, xmm2);
125 xmm6 = _mm256_mul_ps(xmm4, xmm4);
127 xmm4 = _mm256_hadd_ps(xmm6, xmm6);
128 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
130 xmm9 = _mm256_extractf128_ps(xmm4, 1);
136 for (
i = 0;
i < leftovers1; ++
i) {
152 for (
i = 0;
i < leftovers2; ++
i) {
154 diff = src0[0] - points[0];
165 #include <pmmintrin.h>
166 #include <xmmintrin.h>
171 unsigned int num_points)
173 const unsigned int num_bytes = num_points * 8;
175 __m128 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
179 int bound = num_bytes >> 5;
188 for (;
i < bound - 1; ++
i) {
219 if (num_bytes >> 4 & 1) {
236 if (num_bytes >> 3 & 1) {
238 diff = src0[0] - points[0];
250 #include <arm_neon.h>
254 unsigned int num_points)
256 const unsigned int quarter_points = num_points / 4;
259 float32x4x2_t a_vec, b_vec;
260 float32x4x2_t diff_vec;
261 float32x4_t tmp, tmp1, dist_sq;
262 a_vec.val[0] = vdupq_n_f32(
lv_creal(src0[0]));
263 a_vec.val[1] = vdupq_n_f32(
lv_cimag(src0[0]));
264 for (number = 0; number < quarter_points; ++number) {
265 b_vec = vld2q_f32((
float*)points);
266 diff_vec.val[0] = vsubq_f32(a_vec.val[0], b_vec.val[0]);
267 diff_vec.val[1] = vsubq_f32(a_vec.val[1], b_vec.val[1]);
268 tmp = vmulq_f32(diff_vec.val[0], diff_vec.val[0]);
269 tmp1 = vmulq_f32(diff_vec.val[1], diff_vec.val[1]);
271 dist_sq = vaddq_f32(tmp, tmp1);
272 vst1q_f32(target, dist_sq);
276 for (number = quarter_points * 4; number < num_points; ++number) {
284 #ifdef LV_HAVE_GENERIC
288 unsigned int num_points)
290 const unsigned int num_bytes = num_points * 8;
296 for (; i<num_bytes>> 3; ++
i) {
297 diff = src0[0] - points[
i];
310 #ifndef INCLUDED_volk_32fc_x2_square_dist_32f_u_H
311 #define INCLUDED_volk_32fc_x2_square_dist_32f_u_H
313 #include <inttypes.h>
318 #include <immintrin.h>
320 static inline void volk_32fc_x2_square_dist_32f_u_avx2(
float* target,
323 unsigned int num_points)
325 const unsigned int num_bytes = num_points * 8;
327 __m256 xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
331 int bound = num_bytes >> 6;
332 int leftovers1 = (num_bytes >> 3) & 0b11;
335 __m256i idx = _mm256_set_epi32(7, 6, 3, 2, 5, 4, 1, 0);
336 xmm1 = _mm256_setzero_ps();
338 xmm0 = _mm_permute_ps(xmm0, 0b01000100);
339 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 0);
340 xmm1 = _mm256_insertf128_ps(xmm1, xmm0, 1);
342 for (;
i < bound; ++
i) {
343 xmm2 = _mm256_loadu_ps((
float*)&points[0]);
344 xmm3 = _mm256_loadu_ps((
float*)&points[4]);
347 xmm4 = _mm256_sub_ps(xmm1, xmm2);
348 xmm5 = _mm256_sub_ps(xmm1, xmm3);
349 xmm6 = _mm256_mul_ps(xmm4, xmm4);
350 xmm7 = _mm256_mul_ps(xmm5, xmm5);
352 xmm4 = _mm256_hadd_ps(xmm6, xmm7);
353 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
355 _mm256_storeu_ps(target, xmm4);
360 if (num_bytes >> 5 & 1) {
362 xmm2 = _mm256_loadu_ps((
float*)&points[0]);
364 xmm4 = _mm256_sub_ps(xmm1, xmm2);
368 xmm6 = _mm256_mul_ps(xmm4, xmm4);
370 xmm4 = _mm256_hadd_ps(xmm6, xmm6);
371 xmm4 = _mm256_permutevar8x32_ps(xmm4, idx);
373 xmm9 = _mm256_extractf128_ps(xmm4, 1);
379 for (
i = 0;
i < leftovers1; ++
i) {
381 diff = src0[0] - points[0];
FORCE_INLINE __m128 _mm_sub_ps(__m128 a, __m128 b)
Definition: sse2neon.h:2834
float32x4_t __m128
Definition: sse2neon.h:235
FORCE_INLINE __m128 _mm_hadd_ps(__m128 a, __m128 b)
Definition: sse2neon.h:6527
FORCE_INLINE void _mm_storeu_ps(float *p, __m128 a)
Definition: sse2neon.h:2787
FORCE_INLINE __m128 _mm_mul_ps(__m128 a, __m128 b)
Definition: sse2neon.h:2205
FORCE_INLINE __m128 _mm_loadl_pi(__m128 a, __m64 const *p)
Definition: sse2neon.h:1917
FORCE_INLINE __m128 _mm_loadu_ps(const float *p)
Definition: sse2neon.h:1941
FORCE_INLINE __m128 _mm_movelh_ps(__m128 __A, __m128 __B)
Definition: sse2neon.h:2145
FORCE_INLINE __m128 _mm_setzero_ps(void)
Definition: sse2neon.h:2531
int64x1_t __m64
Definition: sse2neon.h:234
FORCE_INLINE __m128 _mm_load_ps(const float *p)
Definition: sse2neon.h:1858
FORCE_INLINE void _mm_store_ps(float *p, __m128 a)
Definition: sse2neon.h:2704
FORCE_INLINE void _mm_storeh_pi(__m64 *p, __m128 a)
Definition: sse2neon.h:2751
static void volk_32fc_x2_square_dist_32f_generic(float *target, lv_32fc_t *src0, lv_32fc_t *points, unsigned int num_points)
Definition: volk_32fc_x2_square_dist_32f.h:285
static void volk_32fc_x2_square_dist_32f_neon(float *target, lv_32fc_t *src0, lv_32fc_t *points, unsigned int num_points)
Definition: volk_32fc_x2_square_dist_32f.h:251
static void volk_32fc_x2_square_dist_32f_a_sse3(float *target, lv_32fc_t *src0, lv_32fc_t *points, unsigned int num_points)
Definition: volk_32fc_x2_square_dist_32f.h:168
#define lv_cimag(x)
Definition: volk_complex.h:98
#define lv_creal(x)
Definition: volk_complex.h:96
float complex lv_32fc_t
Definition: volk_complex.h:74
for i
Definition: volk_config_fixed.tmpl.h:13