diff --git a/lib/include/srslte/phy/utils/simd.h b/lib/include/srslte/phy/utils/simd.h index 35bcf1492..024de72e4 100644 --- a/lib/include/srslte/phy/utils/simd.h +++ b/lib/include/srslte/phy/utils/simd.h @@ -2044,7 +2044,7 @@ static inline simd_b_t srslte_simd_b_sub(simd_b_t a, simd_b_t b) #endif /* LV_HAVE_AVX512 */ } -static inline simd_s_t srslte_simd_b_neg(simd_b_t a, simd_b_t b) +static inline simd_b_t srslte_simd_b_neg(simd_b_t a, simd_b_t b) { #ifdef LV_HAVE_AVX512 __m256i a0 = _mm512_extracti64x4_epi64(a, 0); diff --git a/lib/src/phy/utils/vector_simd.c b/lib/src/phy/utils/vector_simd.c index 3127c3a66..9b0100511 100644 --- a/lib/src/phy/utils/vector_simd.c +++ b/lib/src/phy/utils/vector_simd.c @@ -258,19 +258,19 @@ void srslte_vec_neg_bbb_simd(const int8_t* x, const int8_t* y, int8_t* z, const #if SRSLTE_SIMD_B_SIZE if (SRSLTE_IS_ALIGNED(x) && SRSLTE_IS_ALIGNED(y) && SRSLTE_IS_ALIGNED(z)) { for (; i < len - SRSLTE_SIMD_B_SIZE + 1; i += SRSLTE_SIMD_B_SIZE) { - simd_s_t a = srslte_simd_b_load(&x[i]); - simd_s_t b = srslte_simd_b_load(&y[i]); + simd_b_t a = srslte_simd_b_load(&x[i]); + simd_b_t b = srslte_simd_b_load(&y[i]); - simd_s_t r = srslte_simd_b_neg(a, b); + simd_b_t r = srslte_simd_b_neg(a, b); srslte_simd_b_store(&z[i], r); } } else { for (; i < len - SRSLTE_SIMD_B_SIZE + 1; i += SRSLTE_SIMD_B_SIZE) { - simd_s_t a = srslte_simd_b_loadu(&x[i]); - simd_s_t b = srslte_simd_b_loadu(&y[i]); + simd_b_t a = srslte_simd_b_loadu(&x[i]); + simd_b_t b = srslte_simd_b_loadu(&y[i]); - simd_s_t r = srslte_simd_b_neg(a, b); + simd_b_t r = srslte_simd_b_neg(a, b); srslte_simd_b_storeu(&z[i], r); }