Fix some static analisys warnings under lib/src/phy/fec

master
Xavier Arteaga 4 years ago committed by Andre Puschmann
parent fafa03f7c8
commit efd6c569b8

@ -84,7 +84,7 @@ void* create_ldpc_dec_c(uint8_t bgN, uint8_t bgM, uint16_t ls, float scaling_fct
uint16_t liftN = bgN * ls;
uint16_t hrrN = (bgK + 4) * ls;
if ((vp = malloc(sizeof(struct ldpc_regs_c))) == NULL) {
if ((vp = SRSRAN_MEM_ALLOC(struct ldpc_regs_c, 1)) == NULL) {
return NULL;
}
@ -168,13 +168,13 @@ int init_ldpc_dec_c(void* p, const int8_t* llrs, uint16_t ls)
return -1;
}
bzero(vp->soft_bits, skip * sizeof(int8_t));
srsran_vec_i8_zero(vp->soft_bits, skip);
for (i = skip; i < vp->liftN; i++) {
vp->soft_bits[i] = llrs[i - skip];
}
bzero(vp->check_to_var, (vp->hrrN + vp->ls) * vp->bgM * sizeof(int8_t));
bzero(vp->var_to_check, (vp->hrrN + vp->ls) * sizeof(int8_t));
srsran_vec_i8_zero(vp->check_to_var, (vp->hrrN + vp->ls) * (uint32_t)vp->bgM);
srsran_vec_i8_zero(vp->var_to_check, vp->hrrN + vp->ls);
return 0;
}
@ -249,7 +249,7 @@ int update_ldpc_check_to_var_c(void* p,
vp->prod_v2c[index] *= (vp->var_to_check[i_v2c] >= 0) ? 1 : -1;
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
int8_t* this_check_to_var = vp->check_to_var + i_layer * (vp->hrrN + vp->ls);
@ -268,7 +268,7 @@ int update_ldpc_check_to_var_c(void* p,
this_check_to_var[i_v2c] *= vp->prod_v2c[index] * ((vp->var_to_check[i_v2c] >= 0) ? 1 : -1);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -305,7 +305,7 @@ int update_ldpc_soft_bits_c(void* p, int i_layer, const int8_t (*these_var_indic
}
vp->soft_bits[i_bit] = (int8_t)tmp;
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -47,8 +47,8 @@
* \brief Represents a node of the base factor graph.
*/
typedef union bg_node_t {
int8_t c[SRSRAN_AVX2_B_SIZE]; /*!< Each base node may contain up to \ref SRSRAN_AVX2_B_SIZE lifted nodes. */
__m256i v; /*!< All the lifted nodes of the current base node as a 256-bit line. */
int8_t* c; /*!< Each base node may contain up to \ref SRSRAN_AVX2_B_SIZE lifted nodes. */
__m256i* v; /*!< All the lifted nodes of the current base node as a 256-bit line. */
} bg_node_t;
/*!
@ -63,10 +63,10 @@ static const int8_t infinity7 = (1U << 6U) - 1;
struct ldpc_regs_c_avx2 {
__m256i scaling_fctr; /*!< \brief Scaling factor for the normalized min-sum decoding algorithm. */
bg_node_t* soft_bits; /*!< \brief A-posteriori log-likelihood ratios. */
__m256i* check_to_var; /*!< \brief Check-to-variable messages. */
__m256i* var_to_check; /*!< \brief Variable-to-check messages. */
__m256i* rotated_v2c; /*!< \brief To store a rotated version of the variable-to-check messages. */
bg_node_t soft_bits; /*!< \brief A-posteriori log-likelihood ratios. */
__m256i* check_to_var; /*!< \brief Check-to-variable messages. */
__m256i* var_to_check; /*!< \brief Variable-to-check messages. */
__m256i* rotated_v2c; /*!< \brief To store a rotated version of the variable-to-check messages. */
uint16_t ls; /*!< \brief Lifting size. */
uint8_t hrr; /*!< \brief Number of variable nodes in the high-rate region (before lifting). */
@ -145,33 +145,28 @@ void* create_ldpc_dec_c_avx2(uint8_t bgN, uint8_t bgM, uint16_t ls, float scalin
uint8_t bgK = bgN - bgM;
uint16_t hrr = bgK + 4;
if ((vp = srsran_vec_malloc(sizeof(struct ldpc_regs_c_avx2))) == NULL) {
if ((vp = SRSRAN_MEM_ALLOC(struct ldpc_regs_c_avx2, 1)) == NULL) {
return NULL;
}
SRSRAN_MEM_ZERO(vp, struct ldpc_regs_c_avx2, 1);
if ((vp->soft_bits = srsran_vec_malloc(bgN * sizeof(bg_node_t))) == NULL) {
free(vp);
if ((vp->soft_bits.v = SRSRAN_MEM_ALLOC(__m256i, bgN)) == NULL) {
delete_ldpc_dec_c_avx2(vp);
return NULL;
}
if ((vp->check_to_var = srsran_vec_malloc((hrr + 1) * bgM * sizeof(__m256i))) == NULL) {
free(vp->soft_bits);
free(vp);
if ((vp->check_to_var = SRSRAN_MEM_ALLOC(__m256i, (hrr + 1) * (uint32_t)bgM)) == NULL) {
delete_ldpc_dec_c_avx2(vp);
return NULL;
}
if ((vp->var_to_check = srsran_vec_malloc((hrr + 1) * sizeof(__m256i))) == NULL) {
free(vp->check_to_var);
free(vp->soft_bits);
free(vp);
if ((vp->var_to_check = SRSRAN_MEM_ALLOC(__m256i, hrr + 1)) == NULL) {
delete_ldpc_dec_c_avx2(vp);
return NULL;
}
if ((vp->rotated_v2c = srsran_vec_malloc((hrr + 1) * sizeof(__m256i))) == NULL) {
free(vp->var_to_check);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp);
if ((vp->rotated_v2c = SRSRAN_MEM_ALLOC(__m256i, hrr + 1)) == NULL) {
delete_ldpc_dec_c_avx2(vp);
return NULL;
}
@ -190,13 +185,22 @@ void delete_ldpc_dec_c_avx2(void* p)
{
struct ldpc_regs_c_avx2* vp = p;
if (vp != NULL) {
if (vp == NULL) {
return;
}
if (vp->rotated_v2c) {
free(vp->rotated_v2c);
}
if (vp->var_to_check) {
free(vp->var_to_check);
}
if (vp->check_to_var) {
free(vp->check_to_var);
free(vp->soft_bits);
free(vp);
}
if (vp->soft_bits.v) {
free(vp->soft_bits.v);
}
free(vp);
}
int init_ldpc_dec_c_avx2(void* p, const int8_t* llrs, uint16_t ls)
@ -210,17 +214,17 @@ int init_ldpc_dec_c_avx2(void* p, const int8_t* llrs, uint16_t ls)
}
// the first 2 x LS bits of the codeword are not sent
vp->soft_bits[0].v = _mm256_set1_epi8(0);
vp->soft_bits[1].v = _mm256_set1_epi8(0);
vp->soft_bits.v[0] = _mm256_set1_epi8(0);
vp->soft_bits.v[1] = _mm256_set1_epi8(0);
for (i = 2; i < vp->bgN; i++) {
for (j = 0; j < ls; j++) {
vp->soft_bits[i].c[j] = llrs[(i - 2) * ls + j];
vp->soft_bits.c[i * SRSRAN_AVX2_B_SIZE + j] = llrs[(i - 2) * ls + j];
}
bzero(&(vp->soft_bits[i].c[ls]), (SRSRAN_AVX2_B_SIZE - ls) * sizeof(int8_t));
SRSRAN_MEM_ZERO(&(vp->soft_bits.c[i * SRSRAN_AVX2_B_SIZE + ls]), int8_t, SRSRAN_AVX2_B_SIZE - ls);
}
bzero(vp->check_to_var, (vp->hrr + 1) * vp->bgM * sizeof(__m256i));
bzero(vp->var_to_check, (vp->hrr + 1) * sizeof(__m256i));
SRSRAN_MEM_ZERO(vp->check_to_var, __m256i, (vp->hrr + 1) * (uint32_t)vp->bgM);
SRSRAN_MEM_ZERO(vp->var_to_check, __m256i, vp->hrr + 1);
return 0;
}
@ -235,15 +239,12 @@ int update_ldpc_var_to_check_c_avx2(void* p, int i_layer)
__m256i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1);
// Update the high-rate region.
inner_var_to_check_c_avx2(&(vp->soft_bits[0].v), this_check_to_var, vp->var_to_check, infinity7, vp->hrr);
inner_var_to_check_c_avx2(vp->soft_bits.v, this_check_to_var, vp->var_to_check, infinity7, vp->hrr);
if (i_layer >= 4) {
// Update the extension region.
inner_var_to_check_c_avx2(&(vp->soft_bits[0].v) + vp->hrr + i_layer - 4,
this_check_to_var + vp->hrr,
vp->var_to_check + vp->hrr,
infinity7,
1);
inner_var_to_check_c_avx2(
vp->soft_bits.v + vp->hrr + i_layer - 4, this_check_to_var + vp->hrr, vp->var_to_check + vp->hrr, infinity7, 1);
}
return 0;
@ -304,7 +305,7 @@ int update_ldpc_check_to_var_c_avx2(void* p,
mask_min_epi8 = _mm256_cmpgt_epi8(mins_v2c_epi8, this_abs_v2c_epi8);
mins_v2c_epi8 = _mm256_blendv_epi8(mins_v2c_epi8, help_min_epi8, mask_min_epi8);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m256i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1);
@ -333,7 +334,7 @@ int update_ldpc_check_to_var_c_avx2(void* p,
this_check_to_var[i_v2c_base] = rotate_node_left(this_c2v_epi8, shift, vp->ls);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -366,9 +367,9 @@ int update_ldpc_soft_bits_c_avx2(void* p, int i_layer, const int8_t (*these_var_
// tmp = (tmp < -infty7) : -infty8 ? tmp
mask_epi8 = _mm256_cmpgt_epi8(neg_infty7_epi8, tmp_epi8);
vp->soft_bits[current_var_index].v = _mm256_blendv_epi8(tmp_epi8, neg_infty8_epi8, mask_epi8);
vp->soft_bits.v[current_var_index] = _mm256_blendv_epi8(tmp_epi8, neg_infty8_epi8, mask_epi8);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -386,7 +387,7 @@ int extract_ldpc_message_c_avx2(void* p, uint8_t* message, uint16_t liftK)
for (int i = 0; i < liftK / vp->ls; i++) {
for (j = 0; j < vp->ls; j++) {
message[i * vp->ls + j] = (vp->soft_bits[i].c[j] < 0);
message[i * vp->ls + j] = (vp->soft_bits.c[i * SRSRAN_AVX2_B_SIZE + j] < 0);
}
}

@ -47,8 +47,8 @@
* \brief Represents a node of the base factor graph.
*/
typedef union bg_node_t {
int8_t c[SRSRAN_AVX2_B_SIZE]; /*!< Each base node may contain up to \ref SRSRAN_AVX2_B_SIZE lifted nodes. */
__m256i v; /*!< All the lifted nodes of the current base node as a 256-bit line. */
int8_t* c; /*!< Each base node may contain up to \ref SRSRAN_AVX2_B_SIZE lifted nodes. */
__m256i* v; /*!< All the lifted nodes of the current base node as a 256-bit line. */
} bg_node_t;
/*!
@ -63,11 +63,11 @@ static const int8_t infinity7 = (1U << 6U) - 1;
struct ldpc_regs_c_avx2_flood {
__m256i scaling_fctr; /*!< \brief Scaling factor for the normalized min-sum decoding algorithm. */
bg_node_t* soft_bits; /*!< \brief A-posteriori log-likelihood ratios. */
__m256i* llrs; /*!< \brief A-priori log-likelihood ratios. */
__m256i* check_to_var; /*!< \brief Check-to-variable messages. */
__m256i* var_to_check; /*!< \brief Variable-to-check messages. */
__m256i* rotated_v2c; /*!< \brief To store a rotated version of the variable-to-check messages. */
bg_node_t soft_bits; /*!< \brief A-posteriori log-likelihood ratios. */
__m256i* llrs; /*!< \brief A-priori log-likelihood ratios. */
__m256i* check_to_var; /*!< \brief Check-to-variable messages. */
__m256i* var_to_check; /*!< \brief Variable-to-check messages. */
__m256i* rotated_v2c; /*!< \brief To store a rotated version of the variable-to-check messages. */
uint16_t ls; /*!< \brief Lifting size. */
uint8_t hrr; /*!< \brief Number of variable nodes in the high-rate region (before lifting). */
@ -146,42 +146,34 @@ void* create_ldpc_dec_c_avx2_flood(uint8_t bgN, uint8_t bgM, uint16_t ls, float
uint8_t bgK = bgN - bgM;
uint16_t hrr = bgK + 4;
if ((vp = srsran_vec_malloc(sizeof(struct ldpc_regs_c_avx2_flood))) == NULL) {
if ((vp = SRSRAN_MEM_ALLOC(struct ldpc_regs_c_avx2_flood, 1)) == NULL) {
return NULL;
}
SRSRAN_MEM_ZERO(vp, struct ldpc_regs_c_avx2_flood, 1);
if ((vp->llrs = srsran_vec_malloc(bgN * sizeof(__m256i))) == NULL) {
free(vp);
if ((vp->llrs = SRSRAN_MEM_ALLOC(__m256i, bgN)) == NULL) {
delete_ldpc_dec_c_avx2_flood(vp);
return NULL;
}
if ((vp->soft_bits = srsran_vec_malloc(bgN * sizeof(bg_node_t))) == NULL) {
free(vp->llrs);
free(vp);
if ((vp->soft_bits.v = SRSRAN_MEM_ALLOC(__m256i, bgN)) == NULL) {
delete_ldpc_dec_c_avx2_flood(vp);
return NULL;
}
if ((vp->check_to_var = srsran_vec_malloc((hrr + 1) * bgM * sizeof(__m256i))) == NULL) {
free(vp->soft_bits);
free(vp->llrs);
free(vp);
uint32_t sz = (uint32_t)(hrr + 1) * (uint32_t)bgM;
if ((vp->check_to_var = SRSRAN_MEM_ALLOC(__m256i, sz)) == NULL) {
delete_ldpc_dec_c_avx2_flood(vp);
return NULL;
}
if ((vp->var_to_check = srsran_vec_malloc((hrr + 1) * bgM * sizeof(__m256i))) == NULL) {
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->var_to_check = SRSRAN_MEM_ALLOC(__m256i, sz)) == NULL) {
delete_ldpc_dec_c_avx2_flood(vp);
return NULL;
}
if ((vp->rotated_v2c = srsran_vec_malloc((hrr + 1) * sizeof(__m256i))) == NULL) {
free(vp->var_to_check);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->rotated_v2c = SRSRAN_MEM_ALLOC(__m256i, hrr + 1)) == NULL) {
delete_ldpc_dec_c_avx2_flood(vp);
return NULL;
}
@ -200,14 +192,25 @@ void delete_ldpc_dec_c_avx2_flood(void* p)
{
struct ldpc_regs_c_avx2_flood* vp = p;
if (vp != NULL) {
if (vp == NULL) {
return;
}
if (vp->rotated_v2c) {
free(vp->rotated_v2c);
}
if (vp->var_to_check) {
free(vp->var_to_check);
}
if (vp->check_to_var) {
free(vp->check_to_var);
free(vp->soft_bits);
}
if (vp->soft_bits.v) {
free(vp->soft_bits.v);
}
if (vp->llrs) {
free(vp->llrs);
free(vp);
}
free(vp);
}
int init_ldpc_dec_c_avx2_flood(void* p, const int8_t* llrs, uint16_t ls)
@ -221,20 +224,20 @@ int init_ldpc_dec_c_avx2_flood(void* p, const int8_t* llrs, uint16_t ls)
}
// the first 2 x LS bits of the codeword are not sent
vp->soft_bits[0].v = _mm256_set1_epi8(0);
vp->soft_bits[1].v = _mm256_set1_epi8(0);
vp->soft_bits.v[0] = _mm256_set1_epi8(0);
vp->soft_bits.v[1] = _mm256_set1_epi8(0);
vp->llrs[0] = _mm256_set1_epi8(0);
vp->llrs[1] = _mm256_set1_epi8(0);
for (i = 2; i < vp->bgN; i++) {
for (j = 0; j < ls; j++) {
vp->soft_bits[i].c[j] = llrs[(i - 2) * ls + j];
vp->soft_bits.c[i * SRSRAN_AVX2_B_SIZE + j] = llrs[(i - 2) * ls + j];
}
bzero(&(vp->soft_bits[i].c[ls]), (SRSRAN_AVX2_B_SIZE - ls) * sizeof(int8_t));
vp->llrs[i] = vp->soft_bits[i].v;
srsran_vec_i8_zero(&(vp->soft_bits.c[i * SRSRAN_AVX2_B_SIZE + ls]), SRSRAN_AVX2_B_SIZE - ls);
vp->llrs[i] = vp->soft_bits.v[i];
}
bzero(vp->check_to_var, (vp->hrr + 1) * vp->bgM * sizeof(__m256i));
bzero(vp->var_to_check, (vp->hrr + 1) * vp->bgM * sizeof(__m256i));
SRSRAN_MEM_ZERO(vp->check_to_var, __m256i, (vp->hrr + 1) * (uint32_t)vp->bgM);
SRSRAN_MEM_ZERO(vp->var_to_check, __m256i, (vp->hrr + 1) * (uint32_t)vp->bgM);
return 0;
}
@ -250,11 +253,11 @@ int update_ldpc_var_to_check_c_avx2_flood(void* p, int i_layer)
__m256i* this_var_to_check = vp->var_to_check + i_layer * (vp->hrr + 1);
// Update the high-rate region.
inner_var_to_check_c_avx2(&(vp->soft_bits[0].v), this_check_to_var, this_var_to_check, infinity7, vp->hrr);
inner_var_to_check_c_avx2(&(vp->soft_bits.v[0]), this_check_to_var, this_var_to_check, infinity7, vp->hrr);
if (i_layer >= 4) {
// Update the extension region.
inner_var_to_check_c_avx2(&(vp->soft_bits[0].v) + vp->hrr + i_layer - 4,
inner_var_to_check_c_avx2(&(vp->soft_bits.v[0]) + vp->hrr + i_layer - 4,
this_check_to_var + vp->hrr,
this_var_to_check + vp->hrr,
infinity7,
@ -319,7 +322,7 @@ int update_ldpc_check_to_var_c_avx2_flood(void* p,
mask_min_epi8 = _mm256_cmpgt_epi8(mins_v2c_epi8, this_abs_v2c_epi8);
mins_v2c_epi8 = _mm256_blendv_epi8(mins_v2c_epi8, help_min_epi8, mask_min_epi8);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m256i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1);
@ -348,7 +351,7 @@ int update_ldpc_check_to_var_c_avx2_flood(void* p,
this_check_to_var[i_v2c_base] = rotate_node_left(this_c2v_epi8, shift, vp->ls);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -372,7 +375,7 @@ int update_ldpc_soft_bits_c_avx2_flood(void* p, const int8_t (*these_var_indices
__m256i mask_epi8;
for (i = 0; i < vp->bgN; i++) {
vp->soft_bits[i].v = vp->llrs[i];
vp->soft_bits.v[i] = vp->llrs[i];
}
for (i_layer = 0; i_layer < vp->bgM; i_layer++) {
@ -382,7 +385,7 @@ int update_ldpc_soft_bits_c_avx2_flood(void* p, const int8_t (*these_var_indices
for (i = 0; (current_var_index != -1) && (i < MAX_CNCT); i++) {
i_bit_tmp_base = (current_var_index <= vp->hrr) ? current_var_index : vp->hrr;
tmp_epi8 = _mm256_adds_epi8(this_check_to_var[i_bit_tmp_base], vp->soft_bits[current_var_index].v);
tmp_epi8 = _mm256_adds_epi8(this_check_to_var[i_bit_tmp_base], vp->soft_bits.v[current_var_index]);
// tmp = (tmp > infty7) : infty8 ? tmp
mask_epi8 = _mm256_cmpgt_epi8(tmp_epi8, infty7_epi8);
@ -390,7 +393,7 @@ int update_ldpc_soft_bits_c_avx2_flood(void* p, const int8_t (*these_var_indices
// tmp = (tmp < -infty7) : -infty8 ? tmp
mask_epi8 = _mm256_cmpgt_epi8(neg_infty7_epi8, tmp_epi8);
vp->soft_bits[current_var_index].v = _mm256_blendv_epi8(tmp_epi8, neg_infty8_epi8, mask_epi8);
vp->soft_bits.v[current_var_index] = _mm256_blendv_epi8(tmp_epi8, neg_infty8_epi8, mask_epi8);
current_var_index = these_var_indices[i_layer][i + 1];
}
@ -411,7 +414,7 @@ int extract_ldpc_message_c_avx2_flood(void* p, uint8_t* message, uint16_t liftK)
for (int i = 0; i < liftK / vp->ls; i++) {
for (j = 0; j < vp->ls; j++) {
message[i * vp->ls + j] = (vp->soft_bits[i].c[j] < 0);
message[i * vp->ls + j] = (vp->soft_bits.c[i * SRSRAN_AVX2_B_SIZE + j] < 0);
}
}

@ -359,7 +359,7 @@ int update_ldpc_check_to_var_c_avx2long(void* p,
vp->mins_v2c_epi8[j] = _mm256_blendv_epi8(vp->mins_v2c_epi8[j], help_min_epi8, mask_min_epi8);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m256i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1) * vp->n_subnodes;
@ -394,7 +394,7 @@ int update_ldpc_check_to_var_c_avx2long(void* p,
// rotating right LS - shift positions is the same as rotating left shift positions
rotate_node_right(vp->this_c2v_epi8, this_check_to_var + i_v2c_base, vp->ls - shift, vp->ls, vp->n_subnodes);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -436,7 +436,7 @@ int update_ldpc_soft_bits_c_avx2long(void* p, int i_layer, const int8_t (*these_
vp->soft_bits[current_var_index_subnode + j].v = _mm256_blendv_epi8(tmp_epi8, neg_infty8_epi8, mask_epi8);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -128,107 +128,63 @@ void* create_ldpc_dec_c_avx2long_flood(uint8_t bgN, uint8_t bgM, uint16_t ls, fl
uint8_t bgK = bgN - bgM;
uint16_t hrr = bgK + 4;
if ((vp = srsran_vec_malloc(sizeof(struct ldpc_regs_c_avx2long_flood))) == NULL) {
if ((vp = SRSRAN_MEM_ALLOC(struct ldpc_regs_c_avx2long_flood, 1)) == NULL) {
return NULL;
}
SRSRAN_MEM_ZERO(vp, struct ldpc_regs_c_avx2long_flood, 1);
// compute number of subnodes
int left_out = ls % SRSRAN_AVX2_B_SIZE;
int n_subnodes = ls / SRSRAN_AVX2_B_SIZE + (left_out > 0);
if ((vp->llrs = srsran_vec_malloc(bgN * n_subnodes * sizeof(__m256i))) == NULL) {
free(vp);
if ((vp->llrs = SRSRAN_MEM_ALLOC(__m256i, bgN * n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->soft_bits = srsran_vec_malloc(bgN * n_subnodes * sizeof(bg_node_t))) == NULL) {
free(vp->llrs);
free(vp);
if ((vp->soft_bits = SRSRAN_MEM_ALLOC(bg_node_t, bgN * n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->check_to_var = srsran_vec_malloc((long)(hrr + 1) * bgM * n_subnodes * sizeof(__m256i))) == NULL) {
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->check_to_var = SRSRAN_MEM_ALLOC(__m256i, (hrr + 1) * (uint32_t)bgM * n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->var_to_check_to_free = srsran_vec_malloc(((hrr + 1) * bgM * n_subnodes + 2) * sizeof(__m256i))) == NULL) {
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->var_to_check_to_free = SRSRAN_MEM_ALLOC(__m256i, (hrr + 1) * (uint32_t)bgM * n_subnodes + 2)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
vp->var_to_check = &vp->var_to_check_to_free[1];
if ((vp->minp_v2c_epi8 = srsran_vec_malloc(n_subnodes * sizeof(__m256i))) == NULL) {
free(vp->var_to_check_to_free);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->minp_v2c_epi8 = SRSRAN_MEM_ALLOC(__m256i, n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->mins_v2c_epi8 = srsran_vec_malloc(n_subnodes * sizeof(__m256i))) == NULL) {
free(vp->minp_v2c_epi8);
free(vp->var_to_check_to_free);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->mins_v2c_epi8 = SRSRAN_MEM_ALLOC(__m256i, n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->prod_v2c_epi8 = srsran_vec_malloc(n_subnodes * sizeof(__m256i))) == NULL) {
free(vp->mins_v2c_epi8);
free(vp->minp_v2c_epi8);
free(vp->var_to_check_to_free);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->prod_v2c_epi8 = SRSRAN_MEM_ALLOC(__m256i, n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->min_ix_epi8 = srsran_vec_malloc(n_subnodes * sizeof(__m256i))) == NULL) {
free(vp->prod_v2c_epi8);
free(vp->mins_v2c_epi8);
free(vp->minp_v2c_epi8);
free(vp->var_to_check_to_free);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->min_ix_epi8 = SRSRAN_MEM_ALLOC(__m256i, n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->rotated_v2c = srsran_vec_malloc((long)(hrr + 1) * n_subnodes * sizeof(__m256i))) == NULL) {
free(vp->min_ix_epi8);
free(vp->prod_v2c_epi8);
free(vp->mins_v2c_epi8);
free(vp->minp_v2c_epi8);
free(vp->var_to_check_to_free);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->rotated_v2c = SRSRAN_MEM_ALLOC(__m256i, (hrr + 1) * (uint32_t)n_subnodes)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
if ((vp->this_c2v_epi8_to_free = srsran_vec_malloc((n_subnodes + 2) * sizeof(__m256i))) == NULL) {
free(vp->rotated_v2c);
free(vp->min_ix_epi8);
free(vp->prod_v2c_epi8);
free(vp->mins_v2c_epi8);
free(vp->minp_v2c_epi8);
free(vp->var_to_check_to_free);
free(vp->check_to_var);
free(vp->soft_bits);
free(vp->llrs);
free(vp);
if ((vp->this_c2v_epi8_to_free = SRSRAN_MEM_ALLOC(__m256i, n_subnodes + 2)) == NULL) {
delete_ldpc_dec_c_avx2long_flood(vp);
return NULL;
}
vp->this_c2v_epi8 = &vp->this_c2v_epi8_to_free[1];
@ -250,19 +206,40 @@ void delete_ldpc_dec_c_avx2long_flood(void* p)
{
struct ldpc_regs_c_avx2long_flood* vp = p;
if (vp != NULL) {
if (vp == NULL) {
return;
}
if (vp->this_c2v_epi8_to_free) {
free(vp->this_c2v_epi8_to_free);
}
if (vp->rotated_v2c) {
free(vp->rotated_v2c);
}
if (vp->min_ix_epi8) {
free(vp->min_ix_epi8);
}
if (vp->prod_v2c_epi8) {
free(vp->prod_v2c_epi8);
}
if (vp->mins_v2c_epi8) {
free(vp->mins_v2c_epi8);
}
if (vp->minp_v2c_epi8) {
free(vp->minp_v2c_epi8);
}
if (vp->var_to_check_to_free) {
free(vp->var_to_check_to_free);
}
if (vp->check_to_var) {
free(vp->check_to_var);
}
if (vp->soft_bits) {
free(vp->soft_bits);
}
if (vp->llrs) {
free(vp->llrs);
free(vp);
}
free(vp);
}
int init_ldpc_dec_c_avx2long_flood(void* p, const int8_t* llrs, uint16_t ls)
@ -293,8 +270,8 @@ int init_ldpc_dec_c_avx2long_flood(void* p, const int8_t* llrs, uint16_t ls)
bzero((int8_t*)(vp->llrs + i * vp->n_subnodes + j - 1) + k, (SRSRAN_AVX2_B_SIZE - k) * sizeof(int8_t));
}
bzero(vp->check_to_var, (vp->hrr + 1) * vp->bgM * vp->n_subnodes * sizeof(__m256i));
bzero(vp->var_to_check, (vp->hrr + 1) * vp->bgM * vp->n_subnodes * sizeof(__m256i));
SRSRAN_MEM_ZERO(vp->check_to_var, __m256i, (vp->hrr + 1) * (uint32_t)vp->bgM * (uint32_t)vp->n_subnodes);
SRSRAN_MEM_ZERO(vp->var_to_check, __m256i, (vp->hrr + 1) * (uint32_t)vp->bgM * (uint32_t)vp->n_subnodes);
return 0;
}
@ -387,7 +364,7 @@ int update_ldpc_check_to_var_c_avx2long_flood(void* p,
vp->mins_v2c_epi8[j] = _mm256_blendv_epi8(vp->mins_v2c_epi8[j], help_min_epi8, mask_min_epi8);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m256i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1) * vp->n_subnodes;
@ -419,7 +396,7 @@ int update_ldpc_check_to_var_c_avx2long_flood(void* p,
// rotating right LS - shift positions is the same as rotating left shift positions
rotate_node_right(vp->this_c2v_epi8, this_check_to_var + i_v2c_base, vp->ls - shift, vp->ls, vp->n_subnodes);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -315,7 +315,7 @@ int update_ldpc_check_to_var_c_avx512(void* p,
mask_min_epi8 = _mm512_cmpgt_epi8_mask(mins_v2c_epi8, this_abs_v2c_epi8);
mins_v2c_epi8 = _mm512_mask_blend_epi8(mask_min_epi8, mins_v2c_epi8, help_min_epi8);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m512i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1);
@ -347,7 +347,7 @@ int update_ldpc_check_to_var_c_avx512(void* p,
// rotating right LS - shift positions is the same as rotating left shift positions
rotate_node_right((uint8_t*)vp->this_c2v_epi8, this_check_to_var + i_v2c_base, (vp->ls - shift) % vp->ls, vp->ls);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -381,7 +381,7 @@ int update_ldpc_soft_bits_c_avx512(void* p, int i_layer, const int8_t (*these_va
vp->soft_bits.v[current_var_index] = _mm512_mask_blend_epi8(mask_epi8, tmp_epi8, _mm512_neg_infty8_epi8);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -382,7 +382,7 @@ int update_ldpc_check_to_var_c_avx512long(void* p,
vp->mins_v2c_epi8[j] = _mm512_mask_blend_epi8(mask_min_epi8, vp->mins_v2c_epi8[j], help_min_epi8);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m512i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1) * vp->n_subnodes;
@ -417,7 +417,7 @@ int update_ldpc_check_to_var_c_avx512long(void* p,
// rotating right LS - shift positions is the same as rotating left shift positions
rotate_node_right((uint8_t*)vp->this_c2v_epi8, this_check_to_var + i_v2c_base, (vp->ls - shift) % vp->ls, vp->ls);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -460,7 +460,7 @@ int update_ldpc_soft_bits_c_avx512long(void* p, int i_layer, const int8_t (*thes
_mm512_mask_blend_epi8(mask_epi8, tmp_epi8, _mm512_neg_infty8_epi8);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -293,8 +293,8 @@ int init_ldpc_dec_c_avx512long_flood(void* p, const int8_t* llrs, uint16_t ls)
bzero((int8_t*)(vp->llrs + i * vp->n_subnodes + j - 1) + k, (SRSRAN_AVX512_B_SIZE - k) * sizeof(int8_t));
}
bzero(vp->check_to_var, (vp->hrr + 1) * vp->bgM * vp->n_subnodes * sizeof(__m512i));
bzero(vp->var_to_check, (vp->hrr + 1) * vp->bgM * vp->n_subnodes * sizeof(__m512i));
SRSRAN_MEM_ZERO(vp->check_to_var, __m512i, (vp->hrr + 1) * (uint32_t)vp->bgM * (uint32_t)vp->n_subnodes);
SRSRAN_MEM_ZERO(vp->var_to_check, __m512i, (vp->hrr + 1) * (uint32_t)vp->bgM * (uint32_t)vp->n_subnodes);
return 0;
}
@ -384,7 +384,7 @@ int update_ldpc_check_to_var_c_avx512long_flood(void* p,
vp->mins_v2c_epi8[j] = _mm512_mask_blend_epi8(mask_min_epi8, vp->mins_v2c_epi8[j], help_min_epi8);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
__m512i* this_check_to_var = vp->check_to_var + i_layer * (vp->hrr + 1) * vp->n_subnodes;
@ -418,7 +418,7 @@ int update_ldpc_check_to_var_c_avx512long_flood(void* p,
// rotating right LS - shift positions is the same as rotating left shift positions
rotate_node_right((uint8_t*)vp->this_c2v_epi8, this_check_to_var + i_v2c_base, (vp->ls - shift) % vp->ls, vp->ls);
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -181,15 +181,15 @@ int init_ldpc_dec_c_flood(void* p, const int8_t* llrs, uint16_t ls)
return -1;
}
bzero(vp->llrs, skip * sizeof(int8_t));
bzero(vp->soft_bits, skip * sizeof(int8_t));
srsran_vec_i8_zero(vp->llrs, skip);
srsran_vec_i8_zero(vp->soft_bits, skip);
for (i = skip; i < vp->liftN; i++) {
vp->llrs[i] = llrs[i - skip];
vp->soft_bits[i] = llrs[i - skip];
}
bzero(vp->check_to_var, (vp->hrrN + vp->ls) * vp->bgM * sizeof(int8_t));
bzero(vp->var_to_check, (vp->hrrN + vp->ls) * vp->bgM * sizeof(int8_t));
srsran_vec_i8_zero(vp->check_to_var, (vp->hrrN + vp->ls) * (uint32_t)vp->bgM);
srsran_vec_i8_zero(vp->var_to_check, (vp->hrrN + vp->ls) * (uint32_t)vp->bgM);
return 0;
}
@ -267,7 +267,7 @@ int update_ldpc_check_to_var_c_flood(void* p,
vp->prod_v2c[index] *= (this_var_to_check[i_v2c] >= 0) ? 1 : -1;
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
int8_t* this_check_to_var = vp->check_to_var + i_layer * (vp->hrrN + vp->ls);
@ -286,7 +286,7 @@ int update_ldpc_check_to_var_c_flood(void* p,
this_check_to_var[i_v2c] *= vp->prod_v2c[index] * ((this_var_to_check[i_v2c] >= 0) ? 1 : -1);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -147,8 +147,8 @@ int init_ldpc_dec_f(void* p, const float* llrs, uint16_t ls)
vp->soft_bits[i] = llrs[i - skip];
}
bzero(vp->check_to_var, (vp->hrrN + vp->ls) * vp->bgM * sizeof(float));
bzero(vp->var_to_check, (vp->hrrN + vp->ls) * sizeof(float));
srsran_vec_f_zero(vp->check_to_var, (vp->hrrN + vp->ls) * (uint32_t)vp->bgM);
srsran_vec_f_zero(vp->var_to_check, vp->hrrN + vp->ls);
return 0;
}
@ -223,7 +223,7 @@ int update_ldpc_check_to_var_f(void* p,
vp->prod_v2c[index] *= (vp->var_to_check[i_v2c] >= 0) ? 1 : -1;
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
float* this_check_to_var = vp->check_to_var + i_layer * (vp->hrrN + vp->ls);
@ -242,7 +242,7 @@ int update_ldpc_check_to_var_f(void* p,
this_check_to_var[i_v2c] *= (float)vp->prod_v2c[index] * ((vp->var_to_check[i_v2c] >= 0) ? 1.F : -1.F);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -271,7 +271,7 @@ int update_ldpc_soft_bits_f(void* p, int i_layer, const int8_t (*these_var_indic
vp->soft_bits[i_bit] = this_check_to_var[i_bit_tmp] + this_var_to_check[i_bit_tmp];
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -168,13 +168,13 @@ int init_ldpc_dec_s(void* p, const int16_t* llrs, uint16_t ls)
return -1;
}
bzero(vp->soft_bits, skip * sizeof(int16_t));
srsran_vec_i16_zero(vp->soft_bits, skip);
for (i = skip; i < vp->liftN; i++) {
vp->soft_bits[i] = llrs[i - skip];
}
bzero(vp->check_to_var, (vp->hrrN + vp->ls) * vp->bgM * sizeof(int16_t));
bzero(vp->var_to_check, (vp->hrrN + vp->ls) * sizeof(int16_t));
srsran_vec_i16_zero(vp->check_to_var, (vp->hrrN + vp->ls) * (uint32_t)vp->bgM);
srsran_vec_i16_zero(vp->var_to_check, vp->hrrN + vp->ls);
return 0;
}
@ -249,7 +249,7 @@ int update_ldpc_check_to_var_s(void* p,
vp->prod_v2c[index] *= (vp->var_to_check[i_v2c] >= 0) ? 1 : -1;
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
int16_t* this_check_to_var = vp->check_to_var + i_layer * (vp->hrrN + vp->ls);
@ -268,7 +268,7 @@ int update_ldpc_check_to_var_s(void* p,
this_check_to_var[i_v2c] *= vp->prod_v2c[index] * ((vp->var_to_check[i_v2c] >= 0) ? 1 : -1);
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;
@ -306,7 +306,7 @@ int update_ldpc_soft_bits_s(void* p, int i_layer, const int8_t (*these_var_indic
}
vp->soft_bits[i_bit] = (int16_t)tmp;
}
current_var_index = (*these_var_indices)[i + 1];
current_var_index = (*these_var_indices)[(i + 1) % MAX_CNCT];
}
return 0;

@ -136,7 +136,7 @@ int load_avx2long(void* p, const uint8_t* input, const uint8_t msg_len, const ui
ini = ini + node_size;
}
bzero(vp->codeword + msg_len * vp->n_subnodes, (cdwd_len - msg_len) * vp->n_subnodes * sizeof(__m256i));
SRSRAN_MEM_ZERO(vp->codeword + msg_len * vp->n_subnodes, bg_node_t, (cdwd_len - msg_len) * (uint32_t)vp->n_subnodes);
return 0;
}
@ -203,8 +203,8 @@ void preprocess_systematic_bits_avx2long(srsran_ldpc_encoder_t* q)
int N = q->bgN;
int K = q->bgK;
int M = q->bgM;
int ls = q->ls;
uint32_t M = q->bgM;
uint16_t* pcm = q->pcm;
int k = 0;
@ -214,7 +214,7 @@ void preprocess_systematic_bits_avx2long(srsran_ldpc_encoder_t* q)
__m256i tmp_epi8;
bzero(vp->aux, M * vp->n_subnodes * sizeof(__m256i));
SRSRAN_MEM_ZERO(vp->aux, __m256i, M * vp->n_subnodes);
// split the input message into K chunks of ls bits each and, for all chunks
for (k = 0; k < K; k++) {

@ -125,6 +125,12 @@ static int init_rm(srsran_ldpc_rm_t* p,
return -1;
}
// Protect zero modulo
if (mod_order == 0) {
ERROR("Invalid modulation order");
return -1;
}
// check out_len is multiple of mod_order
if ((E % mod_order) != 0) { // N can only be a multiple of either BASEN[0] or BASEN[1], but not both
ERROR("Wrong RM codeword length (E) = %d. It must be a multiple of modulation order = %d", E, mod_order);

@ -642,11 +642,23 @@ int main(int argc, char** argv)
void print_decoder(char* title, int n_batches, int n_errors, double elapsed_time)
{
double wer = NAN;
if (n_batches != 0 && batch_size != 0) {
wer = (double)n_errors / n_batches / batch_size;
}
printf("\n**** %s ****", title);
printf("\nEstimated word error rate:\n %e (%d errors)\n", (double)n_errors / n_batches / batch_size, n_errors);
printf("\nEstimated word error rate:\n %e (%d errors)\n", wer, n_errors);
double w_rate = NAN;
double k_rate = NAN;
double n_rate = NAN;
if (elapsed_time != 0) {
w_rate = n_batches * batch_size / elapsed_time;
k_rate = n_batches * batch_size * finalK / elapsed_time;
n_rate = n_batches * batch_size * finalN / elapsed_time;
}
printf("Estimated throughput decoder:\n %e word/s\n %e bit/s (information)\n %e bit/s (encoded)\n",
n_batches * batch_size / elapsed_time,
n_batches * batch_size * finalK / elapsed_time,
n_batches * batch_size * finalN / elapsed_time);
w_rate,
k_rate,
n_rate);
}

@ -95,7 +95,7 @@ void get_examples(uint8_t* messages, //
sprintf(cstr, "ls%dcwds", lift_size);
do {
do {
tmp[0] = fgetc(ex_file);
tmp[0] = (char)fgetc(ex_file);
} while (tmp[0] != 'l');
fscanf(ex_file, "%[^\n]", tmp + 1);
fgetc(ex_file); // discard newline

@ -25,6 +25,7 @@
#include "polar_decoder_ssc_c_avx2.h"
#include "../utils_avx2.h"
#include "polar_decoder_vector_avx2.h"
#include "srsran/phy/fec/polar/polar_code.h"
#include "srsran/phy/fec/polar/polar_encoder.h"
#include "srsran/phy/utils/vector.h"
@ -42,13 +43,13 @@ struct StateAVX2 {
* \brief Describes an SSC polar decoder (8-bit version).
*/
struct pSSC_c_avx2 {
int8_t** llr0; /*!< \brief Pointers to the upper half of LLRs values at all stages. */
int8_t** llr1; /*!< \brief Pointers to the lower half of LLRs values at all stages. */
uint8_t* est_bit; /*!< \brief Pointers to the temporary estimated bits. */
struct Params* param; /*!< \brief Pointer to a Params structure. */
struct StateAVX2* state; /*!< \brief Pointer to a State. */
void* tmp_node_type; /*!< \brief Pointer to a Tmp_node_type. */
srsran_polar_encoder_t* enc; /*!< \brief Pointer to a srsran_polar_encoder_t. */
int8_t* llr0[NMAX_LOG + 1]; /*!< \brief Pointers to the upper half of LLRs values at all stages. */
int8_t* llr1[NMAX_LOG + 1]; /*!< \brief Pointers to the lower half of LLRs values at all stages. */
uint8_t* est_bit; /*!< \brief Pointers to the temporary estimated bits. */
struct Params* param; /*!< \brief Pointer to a Params structure. */
struct StateAVX2* state; /*!< \brief Pointer to a State. */
void* tmp_node_type; /*!< \brief Pointer to a Tmp_node_type. */
srsran_polar_encoder_t* enc; /*!< \brief Pointer to a srsran_polar_encoder_t. */
void (*f)(const int8_t* x, const int8_t* y, int8_t* z, const uint16_t len); /*!< \brief Pointer to the function-f. */
void (*g)(const uint8_t* b,
const int8_t* x,
@ -100,12 +101,6 @@ void delete_polar_decoder_ssc_c_avx2(void* p)
if (pp->llr0[0]) {
free(pp->llr0[0]); // remove LLR buffer.
}
if (pp->llr0) {
free(pp->llr0);
}
if (pp->llr1) {
free(pp->llr1);
}
if (pp->param) {
if (pp->param->node_type[0]) {
free(pp->param->node_type[0]);
@ -191,10 +186,6 @@ void* create_polar_decoder_ssc_c_avx2(const uint8_t nMax)
pp->est_bit = srsran_vec_u8_malloc(est_bit_size); // every 32 chars are aligned
// allocate memory for LLR pointers.
pp->llr0 = malloc((nMax + 1) * sizeof(int8_t*));
pp->llr1 = malloc((nMax + 1) * sizeof(int8_t*));
// LLR MEMORY NOT ALIGNED FOR LLR_BUFFERS_SIZE < SRSRAN_SIMB_LLR_ALIGNED
// We do not align the memory at lower stages, as if done, after each function f and function g
@ -303,7 +294,6 @@ int init_polar_decoder_ssc_c_avx2(void* p,
int polar_decoder_ssc_c_avx2(void* p, uint8_t* data_decoded)
{
if (p == NULL) {
return -1;
}
@ -322,7 +312,6 @@ int polar_decoder_ssc_c_avx2(void* p, uint8_t* data_decoded)
static void simplified_node(struct pSSC_c_avx2* p)
{
struct pSSC_c_avx2* pp = p;
pp->state->stage--; // to child node.
@ -336,7 +325,6 @@ static void simplified_node(struct pSSC_c_avx2* p)
uint16_t stage_half_size = 0;
switch (pp->param->node_type[stage][bit_pos]) {
case RATE_1:
pp->hard_bit(pp->llr0[stage], pp->est_bit + pp->state->bit_pos, stage_size);

@ -120,7 +120,6 @@ static inline void srsran_vec_polar_encoder_32_avx2(const uint8_t* x, uint8_t* z
*/
static inline void srsran_vec_xor_bbb_avx2(const uint8_t* x, const uint8_t* y, uint8_t* z, uint16_t len)
{
for (int i = 0; i < len; i += SRSRAN_AVX2_B_SIZE) {
__m256i simd_x = _mm256_loadu_si256((__m256i*)&x[i]);
__m256i simd_y = _mm256_loadu_si256((__m256i*)&y[i]);
@ -133,19 +132,18 @@ static inline void srsran_vec_xor_bbb_avx2(const uint8_t* x, const uint8_t* y, u
int polar_encoder_encode_avx2(void* p, const uint8_t* input, uint8_t* output, const uint8_t code_size_log)
{
struct pAVX2* q = p;
if (q == NULL) {
return -1;
}
uint8_t* tmp = q->tmp;
uint8_t* x = NULL;
uint8_t* y = NULL;
uint8_t* z = NULL;
if (q == NULL) {
return -1;
}
// load data
uint32_t code_size = 1U << code_size_log;

@ -42,6 +42,16 @@ void srsran_polar_code_sets_free(srsran_polar_sets_t* c)
}
}
#define SAFE_READ(PTR, SIZE, N, FILE) \
do { \
size_t nbytes = SIZE * N; \
if (nbytes != fread(PTR, SIZE, N, FILE)) { \
perror("read"); \
fclose(FILE); \
exit(1); \
} \
} while (false)
int srsran_polar_code_sets_read(srsran_polar_sets_t* c,
const uint16_t message_size,
const uint8_t code_size_log,
@ -100,10 +110,10 @@ int srsran_polar_code_sets_read(srsran_polar_sets_t* c,
exit(1);
}
fread(c->info_set, sizeof(uint16_t), c->info_set_size, fptr);
fread(c->message_set, sizeof(uint16_t), c->message_set_size, fptr);
fread(c->parity_set, sizeof(uint16_t), c->parity_set_size, fptr);
fread(c->frozen_set, sizeof(uint16_t), c->frozen_set_size, fptr);
SAFE_READ(c->info_set, sizeof(uint16_t), c->info_set_size, fptr);
SAFE_READ(c->message_set, sizeof(uint16_t), c->message_set_size, fptr);
SAFE_READ(c->parity_set, sizeof(uint16_t), c->parity_set_size, fptr);
SAFE_READ(c->frozen_set, sizeof(uint16_t), c->frozen_set_size, fptr);
fclose(fptr);
return 0;

@ -61,7 +61,11 @@ int main(int argc, char** argv)
uint32_t st = 0, end = 187;
if (long_cb) {
st = srsran_cbsegm_cbindex(long_cb);
int n = srsran_cbsegm_cbindex(long_cb);
if (n < SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
st = (uint32_t)n;
end = st;
}

@ -111,7 +111,6 @@ int main(int argc, char** argv)
short* llr_s;
uint8_t* llr_c;
uint8_t * data_tx, *data_rx, *data_rx_bytes, *symbols;
uint32_t i, j;
float var[SNR_POINTS];
uint32_t snr_points;
uint32_t errors = 0;
@ -131,7 +130,11 @@ int main(int argc, char** argv)
if (test_known_data) {
frame_length = KNOWN_DATA_LEN;
} else {
frame_length = srsran_cbsegm_cbsize(srsran_cbsegm_cbindex(frame_length));
int n = srsran_cbsegm_cbsize(srsran_cbsegm_cbindex(frame_length));
if (n < SRSRAN_SUCCESS) {
return SRSRAN_ERROR;
}
frame_length = (uint32_t)n;
}
coded_length = 3 * (frame_length) + SRSRAN_TCOD_TOTALTAIL;
@ -200,7 +203,7 @@ int main(int argc, char** argv)
ebno_inc = (SNR_MAX - SNR_MIN) / SNR_POINTS;
if (ebno_db == 100.0) {
snr_points = SNR_POINTS;
for (i = 0; i < snr_points; i++) {
for (uint32_t i = 0; i < snr_points; i++) {
ebno_db = SNR_MIN + i * ebno_inc;
esno_db = ebno_db + srsran_convert_power_to_dB(1.0f / 3.0f);
var[i] = srsran_convert_dB_to_amplitude(-esno_db);
@ -210,13 +213,13 @@ int main(int argc, char** argv)
var[0] = srsran_convert_dB_to_amplitude(-esno_db);
snr_points = 1;
}
for (i = 0; i < snr_points; i++) {
for (uint32_t i = 0; i < snr_points; i++) {
mean_usec = 0;
errors = 0;
frame_cnt = 0;
while (frame_cnt < nof_frames) {
/* generate data_tx */
for (j = 0; j < frame_length; j++) {
for (uint32_t j = 0; j < frame_length; j++) {
if (test_known_data) {
data_tx[j] = known_data[j];
} else {
@ -226,19 +229,19 @@ int main(int argc, char** argv)
/* coded BER */
if (test_known_data) {
for (j = 0; j < coded_length; j++) {
for (uint32_t j = 0; j < coded_length; j++) {
symbols[j] = known_data_encoded[j];
}
} else {
srsran_tcod_encode(&tcod, data_tx, symbols, frame_length);
}
for (j = 0; j < coded_length; j++) {
for (uint32_t j = 0; j < coded_length; j++) {
llr[j] = symbols[j] ? 1 : -1;
}
srsran_ch_awgn_f(llr, llr, var[i], coded_length);
for (j = 0; j < coded_length; j++) {
for (uint32_t j = 0; j < coded_length; j++) {
llr_s[j] = (int16_t)(100 * llr[j]);
}

@ -452,7 +452,7 @@ static void tdec_iteration_8(srsran_tdec_t* h, int8_t* input)
if (h->dec_type == SRSRAN_TDEC_AUTO) {
h->current_llr_type = SRSRAN_TDEC_8;
h->current_dec = tdec_sb_idx_8(h->current_long_cb);
h->current_inter_idx = interleaver_idx(h->nof_blocks8[h->current_dec]);
h->current_inter_idx = interleaver_idx(h->nof_blocks8[h->current_dec % SRSRAN_TDEC_NOF_AUTO_MODES_8]);
// If long_cb is not multiple of any 8-bit decoder, use a 16-bit decoder and do type conversion
if (h->current_dec >= 10) {

Loading…
Cancel
Save